interconnect: qcom: icc-rpm: Fix peak rate calculation
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0   0
71 #define BAR_2   2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         return test_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         set_bit(flag, bits);
85 }
86
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89         clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag)                              \
93         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)                          \
95         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)                        \
97         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME         "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM                     3
102 #define TG3_MIN_NUM                     137
103
104 #define RESET_KIND_SHUTDOWN     0
105 #define RESET_KIND_INIT         1
106 #define RESET_KIND_SUSPEND      2
107
108 #define TG3_DEF_RX_MODE         0
109 #define TG3_DEF_TX_MODE         0
110 #define TG3_DEF_MSG_ENABLE        \
111         (NETIF_MSG_DRV          | \
112          NETIF_MSG_PROBE        | \
113          NETIF_MSG_LINK         | \
114          NETIF_MSG_TIMER        | \
115          NETIF_MSG_IFDOWN       | \
116          NETIF_MSG_IFUP         | \
117          NETIF_MSG_RX_ERR       | \
118          NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
121
122 /* length of time before we decide the hardware is borked,
123  * and dev->tx_timeout() should be called to fix the problem
124  */
125
126 #define TG3_TX_TIMEOUT                  (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU                     ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134  * You can't change the ring sizes, but you can change where you place
135  * them in the NIC onboard memory.
136  */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING         200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147  * we really want to expose these constants to GCC so that modulo et
148  * al.  operations are done with shifts and masks instead of with
149  * hw multiply/modulo instructions.  Another solution would be to
150  * replace things like '% foo' with '& (foo - 1)'.
151  */
152
153 #define TG3_TX_RING_SIZE                512
154 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
163                                  TG3_TX_RING_SIZE)
164 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB               64
167
168 #define TG3_RX_STD_DMA_SZ               1536
169 #define TG3_RX_JMB_DMA_SZ               9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183  * that are at least dword aligned when used in PCIX mode.  The driver
184  * works around this bug by double copying the packet.  This workaround
185  * is built into the normal double copy length check for efficiency.
186  *
187  * However, the double copy is only necessary on those architectures
188  * where unaligned memory accesses are inefficient.  For those architectures
189  * where unaligned memory accesses incur little penalty, we can reintegrate
190  * the 5701 in the normal rx path.  Doing so saves a device structure
191  * dereference by hardcoding the double copy threshold in place.
192  */
193 #define TG3_RX_COPY_THRESHOLD           256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
196 #else
197         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K            2048
209 #define TG3_TX_BD_DMA_MAX_4K            4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
217 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3            "tigon/tg3.bin"
220 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289                         PCI_VENDOR_ID_LENOVO,
290                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355         {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361         const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363         { "rx_octets" },
364         { "rx_fragments" },
365         { "rx_ucast_packets" },
366         { "rx_mcast_packets" },
367         { "rx_bcast_packets" },
368         { "rx_fcs_errors" },
369         { "rx_align_errors" },
370         { "rx_xon_pause_rcvd" },
371         { "rx_xoff_pause_rcvd" },
372         { "rx_mac_ctrl_rcvd" },
373         { "rx_xoff_entered" },
374         { "rx_frame_too_long_errors" },
375         { "rx_jabbers" },
376         { "rx_undersize_packets" },
377         { "rx_in_length_errors" },
378         { "rx_out_length_errors" },
379         { "rx_64_or_less_octet_packets" },
380         { "rx_65_to_127_octet_packets" },
381         { "rx_128_to_255_octet_packets" },
382         { "rx_256_to_511_octet_packets" },
383         { "rx_512_to_1023_octet_packets" },
384         { "rx_1024_to_1522_octet_packets" },
385         { "rx_1523_to_2047_octet_packets" },
386         { "rx_2048_to_4095_octet_packets" },
387         { "rx_4096_to_8191_octet_packets" },
388         { "rx_8192_to_9022_octet_packets" },
389
390         { "tx_octets" },
391         { "tx_collisions" },
392
393         { "tx_xon_sent" },
394         { "tx_xoff_sent" },
395         { "tx_flow_control" },
396         { "tx_mac_errors" },
397         { "tx_single_collisions" },
398         { "tx_mult_collisions" },
399         { "tx_deferred" },
400         { "tx_excessive_collisions" },
401         { "tx_late_collisions" },
402         { "tx_collide_2times" },
403         { "tx_collide_3times" },
404         { "tx_collide_4times" },
405         { "tx_collide_5times" },
406         { "tx_collide_6times" },
407         { "tx_collide_7times" },
408         { "tx_collide_8times" },
409         { "tx_collide_9times" },
410         { "tx_collide_10times" },
411         { "tx_collide_11times" },
412         { "tx_collide_12times" },
413         { "tx_collide_13times" },
414         { "tx_collide_14times" },
415         { "tx_collide_15times" },
416         { "tx_ucast_packets" },
417         { "tx_mcast_packets" },
418         { "tx_bcast_packets" },
419         { "tx_carrier_sense_errors" },
420         { "tx_discards" },
421         { "tx_errors" },
422
423         { "dma_writeq_full" },
424         { "dma_write_prioq_full" },
425         { "rxbds_empty" },
426         { "rx_discards" },
427         { "rx_errors" },
428         { "rx_threshold_hit" },
429
430         { "dma_readq_full" },
431         { "dma_read_prioq_full" },
432         { "tx_comp_queue_full" },
433
434         { "ring_set_send_prod_index" },
435         { "ring_status_update" },
436         { "nic_irqs" },
437         { "nic_avoided_irqs" },
438         { "nic_tx_threshold_hit" },
439
440         { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST          0
445 #define TG3_LINK_TEST           1
446 #define TG3_REGISTER_TEST       2
447 #define TG3_MEMORY_TEST         3
448 #define TG3_MAC_LOOPB_TEST      4
449 #define TG3_PHY_LOOPB_TEST      5
450 #define TG3_EXT_LOOPB_TEST      6
451 #define TG3_INTERRUPT_TEST      7
452
453
454 static const struct {
455         const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
458         [TG3_LINK_TEST]         = { "link test         (online) " },
459         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
460         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
461         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
462         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
463         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
464         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
465 };
466
467 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
468
469
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472         writel(val, tp->regs + off);
473 }
474
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477         return readl(tp->regs + off);
478 }
479
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482         writel(val, tp->aperegs + off);
483 }
484
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487         return readl(tp->aperegs + off);
488 }
489
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492         unsigned long flags;
493
494         spin_lock_irqsave(&tp->indirect_lock, flags);
495         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497         spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502         writel(val, tp->regs + off);
503         readl(tp->regs + off);
504 }
505
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508         unsigned long flags;
509         u32 val;
510
511         spin_lock_irqsave(&tp->indirect_lock, flags);
512         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515         return val;
516 }
517
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520         unsigned long flags;
521
522         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524                                        TG3_64BIT_REG_LOW, val);
525                 return;
526         }
527         if (off == TG3_RX_STD_PROD_IDX_REG) {
528                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529                                        TG3_64BIT_REG_LOW, val);
530                 return;
531         }
532
533         spin_lock_irqsave(&tp->indirect_lock, flags);
534         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536         spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538         /* In indirect mode when disabling interrupts, we also need
539          * to clear the interrupt bit in the GRC local ctrl register.
540          */
541         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542             (val == 0x1)) {
543                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545         }
546 }
547
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550         unsigned long flags;
551         u32 val;
552
553         spin_lock_irqsave(&tp->indirect_lock, flags);
554         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556         spin_unlock_irqrestore(&tp->indirect_lock, flags);
557         return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561  * where it is unsafe to read back the register without some delay.
562  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564  */
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568                 /* Non-posted methods */
569                 tp->write32(tp, off, val);
570         else {
571                 /* Posted method */
572                 tg3_write32(tp, off, val);
573                 if (usec_wait)
574                         udelay(usec_wait);
575                 tp->read32(tp, off);
576         }
577         /* Wait again after the read for the posted method to guarantee that
578          * the wait time is met.
579          */
580         if (usec_wait)
581                 udelay(usec_wait);
582 }
583
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586         tp->write32_mbox(tp, off, val);
587         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589              !tg3_flag(tp, ICH_WORKAROUND)))
590                 tp->read32_mbox(tp, off);
591 }
592
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595         void __iomem *mbox = tp->regs + off;
596         writel(val, mbox);
597         if (tg3_flag(tp, TXD_MBOX_HWBUG))
598                 writel(val, mbox);
599         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600             tg3_flag(tp, FLUSH_POSTED_WRITES))
601                 readl(mbox);
602 }
603
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606         return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611         writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val)                  tp->write32(tp, reg, val)
621 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg)                       tp->read32(tp, reg)
624
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627         unsigned long flags;
628
629         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631                 return;
632
633         spin_lock_irqsave(&tp->indirect_lock, flags);
634         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638                 /* Always leave this as zero. */
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640         } else {
641                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644                 /* Always leave this as zero. */
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646         }
647         spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652         unsigned long flags;
653
654         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656                 *val = 0;
657                 return;
658         }
659
660         spin_lock_irqsave(&tp->indirect_lock, flags);
661         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665                 /* Always leave this as zero. */
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667         } else {
668                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669                 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671                 /* Always leave this as zero. */
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673         }
674         spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679         int i;
680         u32 regbase, bit;
681
682         if (tg3_asic_rev(tp) == ASIC_REV_5761)
683                 regbase = TG3_APE_LOCK_GRANT;
684         else
685                 regbase = TG3_APE_PER_LOCK_GRANT;
686
687         /* Make sure the driver hasn't any stale locks. */
688         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689                 switch (i) {
690                 case TG3_APE_LOCK_PHY0:
691                 case TG3_APE_LOCK_PHY1:
692                 case TG3_APE_LOCK_PHY2:
693                 case TG3_APE_LOCK_PHY3:
694                         bit = APE_LOCK_GRANT_DRIVER;
695                         break;
696                 default:
697                         if (!tp->pci_fn)
698                                 bit = APE_LOCK_GRANT_DRIVER;
699                         else
700                                 bit = 1 << tp->pci_fn;
701                 }
702                 tg3_ape_write32(tp, regbase + 4 * i, bit);
703         }
704
705 }
706
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709         int i, off;
710         int ret = 0;
711         u32 status, req, gnt, bit;
712
713         if (!tg3_flag(tp, ENABLE_APE))
714                 return 0;
715
716         switch (locknum) {
717         case TG3_APE_LOCK_GPIO:
718                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719                         return 0;
720                 fallthrough;
721         case TG3_APE_LOCK_GRC:
722         case TG3_APE_LOCK_MEM:
723                 if (!tp->pci_fn)
724                         bit = APE_LOCK_REQ_DRIVER;
725                 else
726                         bit = 1 << tp->pci_fn;
727                 break;
728         case TG3_APE_LOCK_PHY0:
729         case TG3_APE_LOCK_PHY1:
730         case TG3_APE_LOCK_PHY2:
731         case TG3_APE_LOCK_PHY3:
732                 bit = APE_LOCK_REQ_DRIVER;
733                 break;
734         default:
735                 return -EINVAL;
736         }
737
738         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739                 req = TG3_APE_LOCK_REQ;
740                 gnt = TG3_APE_LOCK_GRANT;
741         } else {
742                 req = TG3_APE_PER_LOCK_REQ;
743                 gnt = TG3_APE_PER_LOCK_GRANT;
744         }
745
746         off = 4 * locknum;
747
748         tg3_ape_write32(tp, req + off, bit);
749
750         /* Wait for up to 1 millisecond to acquire lock. */
751         for (i = 0; i < 100; i++) {
752                 status = tg3_ape_read32(tp, gnt + off);
753                 if (status == bit)
754                         break;
755                 if (pci_channel_offline(tp->pdev))
756                         break;
757
758                 udelay(10);
759         }
760
761         if (status != bit) {
762                 /* Revoke the lock request. */
763                 tg3_ape_write32(tp, gnt + off, bit);
764                 ret = -EBUSY;
765         }
766
767         return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772         u32 gnt, bit;
773
774         if (!tg3_flag(tp, ENABLE_APE))
775                 return;
776
777         switch (locknum) {
778         case TG3_APE_LOCK_GPIO:
779                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780                         return;
781                 fallthrough;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831         u32 i, apedata;
832
833         for (i = 0; i < timeout_us / 10; i++) {
834                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837                         break;
838
839                 udelay(10);
840         }
841
842         return i == timeout_us / 10;
843 }
844
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846                                    u32 len)
847 {
848         int err;
849         u32 i, bufoff, msgoff, maxlen, apedata;
850
851         if (!tg3_flag(tp, APE_HAS_NCSI))
852                 return 0;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -ENODEV;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863                  TG3_APE_SHMEM_BASE;
864         msgoff = bufoff + 2 * sizeof(u32);
865         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867         while (len) {
868                 u32 length;
869
870                 /* Cap xfer sizes to scratchpad limits. */
871                 length = (len > maxlen) ? maxlen : len;
872                 len -= length;
873
874                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875                 if (!(apedata & APE_FW_STATUS_READY))
876                         return -EAGAIN;
877
878                 /* Wait for up to 1 msec for APE to service previous event. */
879                 err = tg3_ape_event_lock(tp, 1000);
880                 if (err)
881                         return err;
882
883                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884                           APE_EVENT_STATUS_SCRTCHPD_READ |
885                           APE_EVENT_STATUS_EVENT_PENDING;
886                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888                 tg3_ape_write32(tp, bufoff, base_off);
889                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894                 base_off += length;
895
896                 if (tg3_ape_wait_for_event(tp, 30000))
897                         return -EAGAIN;
898
899                 for (i = 0; length; i += 4, length -= 4) {
900                         u32 val = tg3_ape_read32(tp, msgoff + i);
901                         memcpy(data, &val, sizeof(u32));
902                         data++;
903                 }
904         }
905
906         return 0;
907 }
908 #endif
909
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912         int err;
913         u32 apedata;
914
915         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916         if (apedata != APE_SEG_SIG_MAGIC)
917                 return -EAGAIN;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920         if (!(apedata & APE_FW_STATUS_READY))
921                 return -EAGAIN;
922
923         /* Wait for up to 20 millisecond for APE to service previous event. */
924         err = tg3_ape_event_lock(tp, 20000);
925         if (err)
926                 return err;
927
928         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929                         event | APE_EVENT_STATUS_EVENT_PENDING);
930
931         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934         return 0;
935 }
936
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939         u32 event;
940         u32 apedata;
941
942         if (!tg3_flag(tp, ENABLE_APE))
943                 return;
944
945         switch (kind) {
946         case RESET_KIND_INIT:
947                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949                                 APE_HOST_SEG_SIG_MAGIC);
950                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951                                 APE_HOST_SEG_LEN_MAGIC);
952                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957                                 APE_HOST_BEHAV_NO_PHYLOCK);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959                                     TG3_APE_HOST_DRVR_STATE_START);
960
961                 event = APE_EVENT_STATUS_STATE_START;
962                 break;
963         case RESET_KIND_SHUTDOWN:
964                 if (device_may_wakeup(&tp->pdev->dev) &&
965                     tg3_flag(tp, WOL_ENABLE)) {
966                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967                                             TG3_APE_HOST_WOL_SPEED_AUTO);
968                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969                 } else
970                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974                 event = APE_EVENT_STATUS_STATE_UNLOAD;
975                 break;
976         default:
977                 return;
978         }
979
980         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982         tg3_ape_send_event(tp, event);
983 }
984
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986                                    unsigned long interval)
987 {
988         /* Check if hb interval has exceeded */
989         if (!tg3_flag(tp, ENABLE_APE) ||
990             time_before(jiffies, tp->ape_hb_jiffies + interval))
991                 return;
992
993         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994         tp->ape_hb_jiffies = jiffies;
995 }
996
997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999         int i;
1000
1001         tw32(TG3PCI_MISC_HOST_CTRL,
1002              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003         for (i = 0; i < tp->irq_max; i++)
1004                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009         int i;
1010
1011         tp->irq_sync = 0;
1012         wmb();
1013
1014         tw32(TG3PCI_MISC_HOST_CTRL,
1015              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018         for (i = 0; i < tp->irq_cnt; i++) {
1019                 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022                 if (tg3_flag(tp, 1SHOT_MSI))
1023                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025                 tp->coal_now |= tnapi->coal_now;
1026         }
1027
1028         /* Force an initial interrupt */
1029         if (!tg3_flag(tp, TAGGED_STATUS) &&
1030             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032         else
1033                 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040         struct tg3 *tp = tnapi->tp;
1041         struct tg3_hw_status *sblk = tnapi->hw_status;
1042         unsigned int work_exists = 0;
1043
1044         /* check for phy events */
1045         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046                 if (sblk->status & SD_STATUS_LINK_CHG)
1047                         work_exists = 1;
1048         }
1049
1050         /* check for TX work to do */
1051         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052                 work_exists = 1;
1053
1054         /* check for RX work to do */
1055         if (tnapi->rx_rcb_prod_idx &&
1056             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057                 work_exists = 1;
1058
1059         return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063  *  similar to tg3_enable_ints, but it accurately determines whether there
1064  *  is new work pending and can return without flushing the PIO write
1065  *  which reenables interrupts
1066  */
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069         struct tg3 *tp = tnapi->tp;
1070
1071         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073         /* When doing tagged status, this work check is unnecessary.
1074          * The last_tag we write above tells the chip which piece of
1075          * work we've completed.
1076          */
1077         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084         u32 clock_ctrl;
1085         u32 orig_clock_ctrl;
1086
1087         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088                 return;
1089
1090         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092         orig_clock_ctrl = clock_ctrl;
1093         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094                        CLOCK_CTRL_CLKRUN_OENABLE |
1095                        0x1f);
1096         tp->pci_clock_ctrl = clock_ctrl;
1097
1098         if (tg3_flag(tp, 5705_PLUS)) {
1099                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102                 }
1103         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105                             clock_ctrl |
1106                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107                             40);
1108                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110                             40);
1111         }
1112         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS  5000
1116
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118                          u32 *val)
1119 {
1120         u32 frame_val;
1121         unsigned int loops;
1122         int ret;
1123
1124         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125                 tw32_f(MAC_MI_MODE,
1126                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127                 udelay(80);
1128         }
1129
1130         tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132         *val = 0x0;
1133
1134         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135                       MI_COM_PHY_ADDR_MASK);
1136         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137                       MI_COM_REG_ADDR_MASK);
1138         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140         tw32_f(MAC_MI_COM, frame_val);
1141
1142         loops = PHY_BUSY_LOOPS;
1143         while (loops != 0) {
1144                 udelay(10);
1145                 frame_val = tr32(MAC_MI_COM);
1146
1147                 if ((frame_val & MI_COM_BUSY) == 0) {
1148                         udelay(5);
1149                         frame_val = tr32(MAC_MI_COM);
1150                         break;
1151                 }
1152                 loops -= 1;
1153         }
1154
1155         ret = -EBUSY;
1156         if (loops != 0) {
1157                 *val = frame_val & MI_COM_DATA_MASK;
1158                 ret = 0;
1159         }
1160
1161         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163                 udelay(80);
1164         }
1165
1166         tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168         return ret;
1169 }
1170
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177                           u32 val)
1178 {
1179         u32 frame_val;
1180         unsigned int loops;
1181         int ret;
1182
1183         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185                 return 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE,
1189                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190                 udelay(80);
1191         }
1192
1193         tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196                       MI_COM_PHY_ADDR_MASK);
1197         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198                       MI_COM_REG_ADDR_MASK);
1199         frame_val |= (val & MI_COM_DATA_MASK);
1200         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202         tw32_f(MAC_MI_COM, frame_val);
1203
1204         loops = PHY_BUSY_LOOPS;
1205         while (loops != 0) {
1206                 udelay(10);
1207                 frame_val = tr32(MAC_MI_COM);
1208                 if ((frame_val & MI_COM_BUSY) == 0) {
1209                         udelay(5);
1210                         frame_val = tr32(MAC_MI_COM);
1211                         break;
1212                 }
1213                 loops -= 1;
1214         }
1215
1216         ret = -EBUSY;
1217         if (loops != 0)
1218                 ret = 0;
1219
1220         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222                 udelay(80);
1223         }
1224
1225         tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227         return ret;
1228 }
1229
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237         int err;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240         if (err)
1241                 goto done;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255         return err;
1256 }
1257
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260         int err;
1261
1262         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263         if (err)
1264                 goto done;
1265
1266         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272         if (err)
1273                 goto done;
1274
1275         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278         return err;
1279 }
1280
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283         int err;
1284
1285         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286         if (!err)
1287                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289         return err;
1290 }
1291
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294         int err;
1295
1296         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297         if (!err)
1298                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305         int err;
1306
1307         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1310         if (!err)
1311                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313         return err;
1314 }
1315
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319                 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326         u32 val;
1327         int err;
1328
1329         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331         if (err)
1332                 return err;
1333
1334         if (enable)
1335                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336         else
1337                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342         return err;
1343 }
1344
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348                             reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353         u32 phy_control;
1354         int limit, err;
1355
1356         /* OK, reset it, and poll the BMCR_RESET bit until it
1357          * clears or we time out.
1358          */
1359         phy_control = BMCR_RESET;
1360         err = tg3_writephy(tp, MII_BMCR, phy_control);
1361         if (err != 0)
1362                 return -EBUSY;
1363
1364         limit = 5000;
1365         while (limit--) {
1366                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367                 if (err != 0)
1368                         return -EBUSY;
1369
1370                 if ((phy_control & BMCR_RESET) == 0) {
1371                         udelay(40);
1372                         break;
1373                 }
1374                 udelay(10);
1375         }
1376         if (limit < 0)
1377                 return -EBUSY;
1378
1379         return 0;
1380 }
1381
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384         struct tg3 *tp = bp->priv;
1385         u32 val;
1386
1387         spin_lock_bh(&tp->lock);
1388
1389         if (__tg3_readphy(tp, mii_id, reg, &val))
1390                 val = -EIO;
1391
1392         spin_unlock_bh(&tp->lock);
1393
1394         return val;
1395 }
1396
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399         struct tg3 *tp = bp->priv;
1400         u32 ret = 0;
1401
1402         spin_lock_bh(&tp->lock);
1403
1404         if (__tg3_writephy(tp, mii_id, reg, val))
1405                 ret = -EIO;
1406
1407         spin_unlock_bh(&tp->lock);
1408
1409         return ret;
1410 }
1411
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414         u32 val;
1415         struct phy_device *phydev;
1416
1417         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419         case PHY_ID_BCM50610:
1420         case PHY_ID_BCM50610M:
1421                 val = MAC_PHYCFG2_50610_LED_MODES;
1422                 break;
1423         case PHY_ID_BCMAC131:
1424                 val = MAC_PHYCFG2_AC131_LED_MODES;
1425                 break;
1426         case PHY_ID_RTL8211C:
1427                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428                 break;
1429         case PHY_ID_RTL8201E:
1430                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431                 break;
1432         default:
1433                 return;
1434         }
1435
1436         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437                 tw32(MAC_PHYCFG2, val);
1438
1439                 val = tr32(MAC_PHYCFG1);
1440                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443                 tw32(MAC_PHYCFG1, val);
1444
1445                 return;
1446         }
1447
1448         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450                        MAC_PHYCFG2_FMODE_MASK_MASK |
1451                        MAC_PHYCFG2_GMODE_MASK_MASK |
1452                        MAC_PHYCFG2_ACT_MASK_MASK   |
1453                        MAC_PHYCFG2_QUAL_MASK_MASK |
1454                        MAC_PHYCFG2_INBAND_ENABLE;
1455
1456         tw32(MAC_PHYCFG2, val);
1457
1458         val = tr32(MAC_PHYCFG1);
1459         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466         }
1467         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469         tw32(MAC_PHYCFG1, val);
1470
1471         val = tr32(MAC_EXT_RGMII_MODE);
1472         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473                  MAC_RGMII_MODE_RX_QUALITY |
1474                  MAC_RGMII_MODE_RX_ACTIVITY |
1475                  MAC_RGMII_MODE_RX_ENG_DET |
1476                  MAC_RGMII_MODE_TX_ENABLE |
1477                  MAC_RGMII_MODE_TX_LOWPWR |
1478                  MAC_RGMII_MODE_TX_RESET);
1479         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481                         val |= MAC_RGMII_MODE_RX_INT_B |
1482                                MAC_RGMII_MODE_RX_QUALITY |
1483                                MAC_RGMII_MODE_RX_ACTIVITY |
1484                                MAC_RGMII_MODE_RX_ENG_DET;
1485                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486                         val |= MAC_RGMII_MODE_TX_ENABLE |
1487                                MAC_RGMII_MODE_TX_LOWPWR |
1488                                MAC_RGMII_MODE_TX_RESET;
1489         }
1490         tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496         tw32_f(MAC_MI_MODE, tp->mi_mode);
1497         udelay(80);
1498
1499         if (tg3_flag(tp, MDIOBUS_INITED) &&
1500             tg3_asic_rev(tp) == ASIC_REV_5785)
1501                 tg3_mdio_config_5785(tp);
1502 }
1503
1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506         int i;
1507         u32 reg;
1508         struct phy_device *phydev;
1509
1510         if (tg3_flag(tp, 5717_PLUS)) {
1511                 u32 is_serdes;
1512
1513                 tp->phy_addr = tp->pci_fn + 1;
1514
1515                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517                 else
1518                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1520                 if (is_serdes)
1521                         tp->phy_addr += 7;
1522         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523                 int addr;
1524
1525                 addr = ssb_gige_get_phyaddr(tp->pdev);
1526                 if (addr < 0)
1527                         return addr;
1528                 tp->phy_addr = addr;
1529         } else
1530                 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532         tg3_mdio_start(tp);
1533
1534         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535                 return 0;
1536
1537         tp->mdio_bus = mdiobus_alloc();
1538         if (tp->mdio_bus == NULL)
1539                 return -ENOMEM;
1540
1541         tp->mdio_bus->name     = "tg3 mdio bus";
1542         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543         tp->mdio_bus->priv     = tp;
1544         tp->mdio_bus->parent   = &tp->pdev->dev;
1545         tp->mdio_bus->read     = &tg3_mdio_read;
1546         tp->mdio_bus->write    = &tg3_mdio_write;
1547         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549         /* The bus registration will look for all the PHYs on the mdio bus.
1550          * Unfortunately, it does not ensure the PHY is powered up before
1551          * accessing the PHY ID registers.  A chip reset is the
1552          * quickest way to bring the device back to an operational state..
1553          */
1554         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555                 tg3_bmcr_reset(tp);
1556
1557         i = mdiobus_register(tp->mdio_bus);
1558         if (i) {
1559                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560                 mdiobus_free(tp->mdio_bus);
1561                 return i;
1562         }
1563
1564         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566         if (!phydev || !phydev->drv) {
1567                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568                 mdiobus_unregister(tp->mdio_bus);
1569                 mdiobus_free(tp->mdio_bus);
1570                 return -ENODEV;
1571         }
1572
1573         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574         case PHY_ID_BCM57780:
1575                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 break;
1578         case PHY_ID_BCM50610:
1579         case PHY_ID_BCM50610M:
1580                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581                                      PHY_BRCM_RX_REFCLK_UNUSED |
1582                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584                 fallthrough;
1585         case PHY_ID_RTL8211C:
1586                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587                 break;
1588         case PHY_ID_RTL8201E:
1589         case PHY_ID_BCMAC131:
1590                 phydev->interface = PHY_INTERFACE_MODE_MII;
1591                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593                 break;
1594         }
1595
1596         tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599                 tg3_mdio_config_5785(tp);
1600
1601         return 0;
1602 }
1603
1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606         if (tg3_flag(tp, MDIOBUS_INITED)) {
1607                 tg3_flag_clear(tp, MDIOBUS_INITED);
1608                 mdiobus_unregister(tp->mdio_bus);
1609                 mdiobus_free(tp->mdio_bus);
1610         }
1611 }
1612
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616         u32 val;
1617
1618         val = tr32(GRC_RX_CPU_EVENT);
1619         val |= GRC_RX_CPU_DRIVER_EVENT;
1620         tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622         tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630         int i;
1631         unsigned int delay_cnt;
1632         long time_remain;
1633
1634         /* If enough time has passed, no wait is necessary. */
1635         time_remain = (long)(tp->last_event_jiffies + 1 +
1636                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637                       (long)jiffies;
1638         if (time_remain < 0)
1639                 return;
1640
1641         /* Check if we can shorten the wait time. */
1642         delay_cnt = jiffies_to_usecs(time_remain);
1643         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645         delay_cnt = (delay_cnt >> 3) + 1;
1646
1647         for (i = 0; i < delay_cnt; i++) {
1648                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649                         break;
1650                 if (pci_channel_offline(tp->pdev))
1651                         break;
1652
1653                 udelay(8);
1654         }
1655 }
1656
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660         u32 reg, val;
1661
1662         val = 0;
1663         if (!tg3_readphy(tp, MII_BMCR, &reg))
1664                 val = reg << 16;
1665         if (!tg3_readphy(tp, MII_BMSR, &reg))
1666                 val |= (reg & 0xffff);
1667         *data++ = val;
1668
1669         val = 0;
1670         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1671                 val = reg << 16;
1672         if (!tg3_readphy(tp, MII_LPA, &reg))
1673                 val |= (reg & 0xffff);
1674         *data++ = val;
1675
1676         val = 0;
1677         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1679                         val = reg << 16;
1680                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1681                         val |= (reg & 0xffff);
1682         }
1683         *data++ = val;
1684
1685         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1686                 val = reg << 16;
1687         else
1688                 val = 0;
1689         *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695         u32 data[4];
1696
1697         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698                 return;
1699
1700         tg3_phy_gather_ump_data(tp, data);
1701
1702         tg3_wait_for_event_ack(tp);
1703
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711         tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718                 /* Wait for RX cpu to ACK the previous event. */
1719                 tg3_wait_for_event_ack(tp);
1720
1721                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723                 tg3_generate_fw_event(tp);
1724
1725                 /* Wait for RX cpu to ACK this event. */
1726                 tg3_wait_for_event_ack(tp);
1727         }
1728 }
1729
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737                 switch (kind) {
1738                 case RESET_KIND_INIT:
1739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740                                       DRV_STATE_START);
1741                         break;
1742
1743                 case RESET_KIND_SHUTDOWN:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_UNLOAD);
1746                         break;
1747
1748                 case RESET_KIND_SUSPEND:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_SUSPEND);
1751                         break;
1752
1753                 default:
1754                         break;
1755                 }
1756         }
1757 }
1758
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763                 switch (kind) {
1764                 case RESET_KIND_INIT:
1765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766                                       DRV_STATE_START_DONE);
1767                         break;
1768
1769                 case RESET_KIND_SHUTDOWN:
1770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771                                       DRV_STATE_UNLOAD_DONE);
1772                         break;
1773
1774                 default:
1775                         break;
1776                 }
1777         }
1778 }
1779
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783         if (tg3_flag(tp, ENABLE_ASF)) {
1784                 switch (kind) {
1785                 case RESET_KIND_INIT:
1786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787                                       DRV_STATE_START);
1788                         break;
1789
1790                 case RESET_KIND_SHUTDOWN:
1791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792                                       DRV_STATE_UNLOAD);
1793                         break;
1794
1795                 case RESET_KIND_SUSPEND:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_SUSPEND);
1798                         break;
1799
1800                 default:
1801                         break;
1802                 }
1803         }
1804 }
1805
1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808         int i;
1809         u32 val;
1810
1811         if (tg3_flag(tp, NO_FWARE_REPORTED))
1812                 return 0;
1813
1814         if (tg3_flag(tp, IS_SSB_CORE)) {
1815                 /* We don't use firmware. */
1816                 return 0;
1817         }
1818
1819         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820                 /* Wait up to 20ms for init done. */
1821                 for (i = 0; i < 200; i++) {
1822                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823                                 return 0;
1824                         if (pci_channel_offline(tp->pdev))
1825                                 return -ENODEV;
1826
1827                         udelay(100);
1828                 }
1829                 return -ENODEV;
1830         }
1831
1832         /* Wait for firmware initialization to complete. */
1833         for (i = 0; i < 100000; i++) {
1834                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836                         break;
1837                 if (pci_channel_offline(tp->pdev)) {
1838                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840                                 netdev_info(tp->dev, "No firmware running\n");
1841                         }
1842
1843                         break;
1844                 }
1845
1846                 udelay(10);
1847         }
1848
1849         /* Chip might not be fitted with firmware.  Some Sun onboard
1850          * parts are configured like that.  So don't signal the timeout
1851          * of the above loop as an error, but do report the lack of
1852          * running firmware once.
1853          */
1854         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857                 netdev_info(tp->dev, "No firmware running\n");
1858         }
1859
1860         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861                 /* The 57765 A0 needs a little more
1862                  * time to do some important work.
1863                  */
1864                 mdelay(10);
1865         }
1866
1867         return 0;
1868 }
1869
1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872         if (!netif_carrier_ok(tp->dev)) {
1873                 netif_info(tp, link, tp->dev, "Link is down\n");
1874                 tg3_ump_link_report(tp);
1875         } else if (netif_msg_link(tp)) {
1876                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877                             (tp->link_config.active_speed == SPEED_1000 ?
1878                              1000 :
1879                              (tp->link_config.active_speed == SPEED_100 ?
1880                               100 : 10)),
1881                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1882                              "full" : "half"));
1883
1884                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886                             "on" : "off",
1887                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888                             "on" : "off");
1889
1890                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891                         netdev_info(tp->dev, "EEE is %s\n",
1892                                     tp->setlpicnt ? "enabled" : "disabled");
1893
1894                 tg3_ump_link_report(tp);
1895         }
1896
1897         tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902         u32 flowctrl = 0;
1903
1904         if (adv & ADVERTISE_PAUSE_CAP) {
1905                 flowctrl |= FLOW_CTRL_RX;
1906                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907                         flowctrl |= FLOW_CTRL_TX;
1908         } else if (adv & ADVERTISE_PAUSE_ASYM)
1909                 flowctrl |= FLOW_CTRL_TX;
1910
1911         return flowctrl;
1912 }
1913
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916         u16 miireg;
1917
1918         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919                 miireg = ADVERTISE_1000XPAUSE;
1920         else if (flow_ctrl & FLOW_CTRL_TX)
1921                 miireg = ADVERTISE_1000XPSE_ASYM;
1922         else if (flow_ctrl & FLOW_CTRL_RX)
1923                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924         else
1925                 miireg = 0;
1926
1927         return miireg;
1928 }
1929
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932         u32 flowctrl = 0;
1933
1934         if (adv & ADVERTISE_1000XPAUSE) {
1935                 flowctrl |= FLOW_CTRL_RX;
1936                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937                         flowctrl |= FLOW_CTRL_TX;
1938         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939                 flowctrl |= FLOW_CTRL_TX;
1940
1941         return flowctrl;
1942 }
1943
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946         u8 cap = 0;
1947
1948         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951                 if (lcladv & ADVERTISE_1000XPAUSE)
1952                         cap = FLOW_CTRL_RX;
1953                 if (rmtadv & ADVERTISE_1000XPAUSE)
1954                         cap = FLOW_CTRL_TX;
1955         }
1956
1957         return cap;
1958 }
1959
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962         u8 autoneg;
1963         u8 flowctrl = 0;
1964         u32 old_rx_mode = tp->rx_mode;
1965         u32 old_tx_mode = tp->tx_mode;
1966
1967         if (tg3_flag(tp, USE_PHYLIB))
1968                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969         else
1970                 autoneg = tp->link_config.autoneg;
1971
1972         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975                 else
1976                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977         } else
1978                 flowctrl = tp->link_config.flowctrl;
1979
1980         tp->link_config.active_flowctrl = flowctrl;
1981
1982         if (flowctrl & FLOW_CTRL_RX)
1983                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984         else
1985                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987         if (old_rx_mode != tp->rx_mode)
1988                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990         if (flowctrl & FLOW_CTRL_TX)
1991                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992         else
1993                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995         if (old_tx_mode != tp->tx_mode)
1996                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001         u8 oldflowctrl, linkmesg = 0;
2002         u32 mac_mode, lcl_adv, rmt_adv;
2003         struct tg3 *tp = netdev_priv(dev);
2004         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006         spin_lock_bh(&tp->lock);
2007
2008         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009                                     MAC_MODE_HALF_DUPLEX);
2010
2011         oldflowctrl = tp->link_config.active_flowctrl;
2012
2013         if (phydev->link) {
2014                 lcl_adv = 0;
2015                 rmt_adv = 0;
2016
2017                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2019                 else if (phydev->speed == SPEED_1000 ||
2020                          tg3_asic_rev(tp) != ASIC_REV_5785)
2021                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022                 else
2023                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025                 if (phydev->duplex == DUPLEX_HALF)
2026                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2027                 else {
2028                         lcl_adv = mii_advertise_flowctrl(
2029                                   tp->link_config.flowctrl);
2030
2031                         if (phydev->pause)
2032                                 rmt_adv = LPA_PAUSE_CAP;
2033                         if (phydev->asym_pause)
2034                                 rmt_adv |= LPA_PAUSE_ASYM;
2035                 }
2036
2037                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038         } else
2039                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041         if (mac_mode != tp->mac_mode) {
2042                 tp->mac_mode = mac_mode;
2043                 tw32_f(MAC_MODE, tp->mac_mode);
2044                 udelay(40);
2045         }
2046
2047         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048                 if (phydev->speed == SPEED_10)
2049                         tw32(MAC_MI_STAT,
2050                              MAC_MI_STAT_10MBPS_MODE |
2051                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052                 else
2053                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054         }
2055
2056         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057                 tw32(MAC_TX_LENGTHS,
2058                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059                       (6 << TX_LENGTHS_IPG_SHIFT) |
2060                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061         else
2062                 tw32(MAC_TX_LENGTHS,
2063                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064                       (6 << TX_LENGTHS_IPG_SHIFT) |
2065                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067         if (phydev->link != tp->old_link ||
2068             phydev->speed != tp->link_config.active_speed ||
2069             phydev->duplex != tp->link_config.active_duplex ||
2070             oldflowctrl != tp->link_config.active_flowctrl)
2071                 linkmesg = 1;
2072
2073         tp->old_link = phydev->link;
2074         tp->link_config.active_speed = phydev->speed;
2075         tp->link_config.active_duplex = phydev->duplex;
2076
2077         spin_unlock_bh(&tp->lock);
2078
2079         if (linkmesg)
2080                 tg3_link_report(tp);
2081 }
2082
2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085         struct phy_device *phydev;
2086
2087         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088                 return 0;
2089
2090         /* Bring the PHY back to a known state. */
2091         tg3_bmcr_reset(tp);
2092
2093         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095         /* Attach the MAC to the PHY. */
2096         phydev = phy_connect(tp->dev, phydev_name(phydev),
2097                              tg3_adjust_link, phydev->interface);
2098         if (IS_ERR(phydev)) {
2099                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100                 return PTR_ERR(phydev);
2101         }
2102
2103         /* Mask with MAC supported features. */
2104         switch (phydev->interface) {
2105         case PHY_INTERFACE_MODE_GMII:
2106         case PHY_INTERFACE_MODE_RGMII:
2107                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108                         phy_set_max_speed(phydev, SPEED_1000);
2109                         phy_support_asym_pause(phydev);
2110                         break;
2111                 }
2112                 fallthrough;
2113         case PHY_INTERFACE_MODE_MII:
2114                 phy_set_max_speed(phydev, SPEED_100);
2115                 phy_support_asym_pause(phydev);
2116                 break;
2117         default:
2118                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119                 return -EINVAL;
2120         }
2121
2122         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124         phy_attached_info(phydev);
2125
2126         return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131         struct phy_device *phydev;
2132
2133         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134                 return;
2135
2136         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140                 phydev->speed = tp->link_config.speed;
2141                 phydev->duplex = tp->link_config.duplex;
2142                 phydev->autoneg = tp->link_config.autoneg;
2143                 ethtool_convert_legacy_u32_to_link_mode(
2144                         phydev->advertising, tp->link_config.advertising);
2145         }
2146
2147         phy_start(phydev);
2148
2149         phy_start_aneg(phydev);
2150 }
2151
2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155                 return;
2156
2157         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165         }
2166 }
2167
2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170         int err;
2171         u32 val;
2172
2173         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174                 return 0;
2175
2176         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177                 /* Cannot do read-modify-write on 5401 */
2178                 err = tg3_phy_auxctl_write(tp,
2179                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181                                            0x4c20);
2182                 goto done;
2183         }
2184
2185         err = tg3_phy_auxctl_read(tp,
2186                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187         if (err)
2188                 return err;
2189
2190         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191         err = tg3_phy_auxctl_write(tp,
2192                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195         return err;
2196 }
2197
2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200         u32 phytest;
2201
2202         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203                 u32 phy;
2204
2205                 tg3_writephy(tp, MII_TG3_FET_TEST,
2206                              phytest | MII_TG3_FET_SHADOW_EN);
2207                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208                         if (enable)
2209                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210                         else
2211                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213                 }
2214                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215         }
2216 }
2217
2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220         u32 reg;
2221
2222         if (!tg3_flag(tp, 5705_PLUS) ||
2223             (tg3_flag(tp, 5717_PLUS) &&
2224              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225                 return;
2226
2227         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228                 tg3_phy_fet_toggle_apd(tp, enable);
2229                 return;
2230         }
2231
2232         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234               MII_TG3_MISC_SHDW_SCR5_SDTL |
2235               MII_TG3_MISC_SHDW_SCR5_C125OE;
2236         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243         if (enable)
2244                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251         u32 phy;
2252
2253         if (!tg3_flag(tp, 5705_PLUS) ||
2254             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255                 return;
2256
2257         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258                 u32 ephy;
2259
2260                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263                         tg3_writephy(tp, MII_TG3_FET_TEST,
2264                                      ephy | MII_TG3_FET_SHADOW_EN);
2265                         if (!tg3_readphy(tp, reg, &phy)) {
2266                                 if (enable)
2267                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268                                 else
2269                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270                                 tg3_writephy(tp, reg, phy);
2271                         }
2272                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273                 }
2274         } else {
2275                 int ret;
2276
2277                 ret = tg3_phy_auxctl_read(tp,
2278                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279                 if (!ret) {
2280                         if (enable)
2281                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282                         else
2283                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284                         tg3_phy_auxctl_write(tp,
2285                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286                 }
2287         }
2288 }
2289
2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292         int ret;
2293         u32 val;
2294
2295         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296                 return;
2297
2298         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299         if (!ret)
2300                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306         u32 otp, phy;
2307
2308         if (!tp->phy_otp)
2309                 return;
2310
2311         otp = tp->phy_otp;
2312
2313         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314                 return;
2315
2316         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338         tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2342 {
2343         u32 val;
2344         struct ethtool_eee *dest = &tp->eee;
2345
2346         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347                 return;
2348
2349         if (eee)
2350                 dest = eee;
2351
2352         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353                 return;
2354
2355         /* Pull eee_active */
2356         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358                 dest->eee_active = 1;
2359         } else
2360                 dest->eee_active = 0;
2361
2362         /* Pull lp advertised settings */
2363         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364                 return;
2365         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366
2367         /* Pull advertised and eee_enabled settings */
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369                 return;
2370         dest->eee_enabled = !!val;
2371         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372
2373         /* Pull tx_lpi_enabled */
2374         val = tr32(TG3_CPMU_EEE_MODE);
2375         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377         /* Pull lpi timer value */
2378         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383         u32 val;
2384
2385         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386                 return;
2387
2388         tp->setlpicnt = 0;
2389
2390         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391             current_link_up &&
2392             tp->link_config.active_duplex == DUPLEX_FULL &&
2393             (tp->link_config.active_speed == SPEED_100 ||
2394              tp->link_config.active_speed == SPEED_1000)) {
2395                 u32 eeectl;
2396
2397                 if (tp->link_config.active_speed == SPEED_1000)
2398                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399                 else
2400                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404                 tg3_eee_pull_config(tp, NULL);
2405                 if (tp->eee.eee_active)
2406                         tp->setlpicnt = 2;
2407         }
2408
2409         if (!tp->setlpicnt) {
2410                 if (current_link_up &&
2411                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2414                 }
2415
2416                 val = tr32(TG3_CPMU_EEE_MODE);
2417                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418         }
2419 }
2420
2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423         u32 val;
2424
2425         if (tp->link_config.active_speed == SPEED_1000 &&
2426             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428              tg3_flag(tp, 57765_CLASS)) &&
2429             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430                 val = MII_TG3_DSP_TAP26_ALNOKO |
2431                       MII_TG3_DSP_TAP26_RMRXSTO;
2432                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434         }
2435
2436         val = tr32(TG3_CPMU_EEE_MODE);
2437         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442         int limit = 100;
2443
2444         while (limit--) {
2445                 u32 tmp32;
2446
2447                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448                         if ((tmp32 & 0x1000) == 0)
2449                                 break;
2450                 }
2451         }
2452         if (limit < 0)
2453                 return -EBUSY;
2454
2455         return 0;
2456 }
2457
2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460         static const u32 test_pat[4][6] = {
2461         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465         };
2466         int chan;
2467
2468         for (chan = 0; chan < 4; chan++) {
2469                 int i;
2470
2471                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472                              (chan * 0x2000) | 0x0200);
2473                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475                 for (i = 0; i < 6; i++)
2476                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477                                      test_pat[chan][i]);
2478
2479                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480                 if (tg3_wait_macro_done(tp)) {
2481                         *resetp = 1;
2482                         return -EBUSY;
2483                 }
2484
2485                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486                              (chan * 0x2000) | 0x0200);
2487                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488                 if (tg3_wait_macro_done(tp)) {
2489                         *resetp = 1;
2490                         return -EBUSY;
2491                 }
2492
2493                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494                 if (tg3_wait_macro_done(tp)) {
2495                         *resetp = 1;
2496                         return -EBUSY;
2497                 }
2498
2499                 for (i = 0; i < 6; i += 2) {
2500                         u32 low, high;
2501
2502                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504                             tg3_wait_macro_done(tp)) {
2505                                 *resetp = 1;
2506                                 return -EBUSY;
2507                         }
2508                         low &= 0x7fff;
2509                         high &= 0x000f;
2510                         if (low != test_pat[chan][i] ||
2511                             high != test_pat[chan][i+1]) {
2512                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516                                 return -EBUSY;
2517                         }
2518                 }
2519         }
2520
2521         return 0;
2522 }
2523
2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526         int chan;
2527
2528         for (chan = 0; chan < 4; chan++) {
2529                 int i;
2530
2531                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532                              (chan * 0x2000) | 0x0200);
2533                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534                 for (i = 0; i < 6; i++)
2535                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537                 if (tg3_wait_macro_done(tp))
2538                         return -EBUSY;
2539         }
2540
2541         return 0;
2542 }
2543
2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546         u32 reg32, phy9_orig;
2547         int retries, do_phy_reset, err;
2548
2549         retries = 10;
2550         do_phy_reset = 1;
2551         do {
2552                 if (do_phy_reset) {
2553                         err = tg3_bmcr_reset(tp);
2554                         if (err)
2555                                 return err;
2556                         do_phy_reset = 0;
2557                 }
2558
2559                 /* Disable transmitter and interrupt.  */
2560                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2561                         continue;
2562
2563                 reg32 |= 0x3000;
2564                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566                 /* Set full-duplex, 1000 mbps.  */
2567                 tg3_writephy(tp, MII_BMCR,
2568                              BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570                 /* Set to master mode.  */
2571                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572                         continue;
2573
2574                 tg3_writephy(tp, MII_CTRL1000,
2575                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578                 if (err)
2579                         return err;
2580
2581                 /* Block the PHY control access.  */
2582                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585                 if (!err)
2586                         break;
2587         } while (--retries);
2588
2589         err = tg3_phy_reset_chanpat(tp);
2590         if (err)
2591                 return err;
2592
2593         tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598         tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2603         if (err)
2604                 return err;
2605
2606         reg32 &= ~0x3000;
2607         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609         return 0;
2610 }
2611
2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614         netif_carrier_off(tp->dev);
2615         tp->link_up = false;
2616 }
2617
2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620         if (tg3_flag(tp, ENABLE_ASF))
2621                 netdev_warn(tp->dev,
2622                             "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626  * link unless the FORCE argument is non-zero.
2627  */
2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630         u32 val, cpmuctrl;
2631         int err;
2632
2633         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634                 val = tr32(GRC_MISC_CFG);
2635                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636                 udelay(40);
2637         }
2638         err  = tg3_readphy(tp, MII_BMSR, &val);
2639         err |= tg3_readphy(tp, MII_BMSR, &val);
2640         if (err != 0)
2641                 return -EBUSY;
2642
2643         if (netif_running(tp->dev) && tp->link_up) {
2644                 netif_carrier_off(tp->dev);
2645                 tg3_link_report(tp);
2646         }
2647
2648         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650             tg3_asic_rev(tp) == ASIC_REV_5705) {
2651                 err = tg3_phy_reset_5703_4_5(tp);
2652                 if (err)
2653                         return err;
2654                 goto out;
2655         }
2656
2657         cpmuctrl = 0;
2658         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662                         tw32(TG3_CPMU_CTRL,
2663                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664         }
2665
2666         err = tg3_bmcr_reset(tp);
2667         if (err)
2668                 return err;
2669
2670         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675         }
2676
2677         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2682                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683                         udelay(40);
2684                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685                 }
2686         }
2687
2688         if (tg3_flag(tp, 5717_PLUS) &&
2689             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690                 return 0;
2691
2692         tg3_phy_apply_otp(tp);
2693
2694         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695                 tg3_phy_toggle_apd(tp, true);
2696         else
2697                 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705         }
2706
2707         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710         }
2711
2712         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2715                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2716                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2718                 }
2719         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724                                 tg3_writephy(tp, MII_TG3_TEST1,
2725                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2726                         } else
2727                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2730                 }
2731         }
2732
2733         /* Set Extended packet length bit (bit 14) on all chips that */
2734         /* support jumbo frames */
2735         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736                 /* Cannot do read-modify-write on 5401 */
2737                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739                 /* Set bit 14 with read-modify-write to preserve other bits */
2740                 err = tg3_phy_auxctl_read(tp,
2741                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742                 if (!err)
2743                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745         }
2746
2747         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748          * jumbo frames transmission.
2749          */
2750         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754         }
2755
2756         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757                 /* adjust output voltage */
2758                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759         }
2760
2761         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764         tg3_phy_toggle_automdix(tp, true);
2765         tg3_phy_set_wirespeed(tp);
2766         return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2771 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2772                                           TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777          (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783          (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787         u32 status, shift;
2788
2789         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790             tg3_asic_rev(tp) == ASIC_REV_5719)
2791                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792         else
2793                 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796         status &= ~(TG3_GPIO_MSG_MASK << shift);
2797         status |= (newstat << shift);
2798
2799         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800             tg3_asic_rev(tp) == ASIC_REV_5719)
2801                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802         else
2803                 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805         return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810         if (!tg3_flag(tp, IS_NIC))
2811                 return 0;
2812
2813         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815             tg3_asic_rev(tp) == ASIC_REV_5720) {
2816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817                         return -EIO;
2818
2819                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825         } else {
2826                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2828         }
2829
2830         return 0;
2831 }
2832
2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835         u32 grc_local_ctrl;
2836
2837         if (!tg3_flag(tp, IS_NIC) ||
2838             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839             tg3_asic_rev(tp) == ASIC_REV_5701)
2840                 return;
2841
2842         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844         tw32_wait_f(GRC_LOCAL_CTRL,
2845                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848         tw32_wait_f(GRC_LOCAL_CTRL,
2849                     grc_local_ctrl,
2850                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852         tw32_wait_f(GRC_LOCAL_CTRL,
2853                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859         if (!tg3_flag(tp, IS_NIC))
2860                 return;
2861
2862         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863             tg3_asic_rev(tp) == ASIC_REV_5701) {
2864                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865                             (GRC_LCLCTRL_GPIO_OE0 |
2866                              GRC_LCLCTRL_GPIO_OE1 |
2867                              GRC_LCLCTRL_GPIO_OE2 |
2868                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2869                              GRC_LCLCTRL_GPIO_OUTPUT1),
2870                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2871         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875                                      GRC_LCLCTRL_GPIO_OE1 |
2876                                      GRC_LCLCTRL_GPIO_OE2 |
2877                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2878                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2879                                      tp->grc_local_ctrl;
2880                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2890         } else {
2891                 u32 no_gpio2;
2892                 u32 grc_local_ctrl = 0;
2893
2894                 /* Workaround to prevent overdrawing Amps. */
2895                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898                                     grc_local_ctrl,
2899                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2900                 }
2901
2902                 /* On 5753 and variants, GPIO2 cannot be used. */
2903                 no_gpio2 = tp->nic_sram_data_cfg &
2904                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907                                   GRC_LCLCTRL_GPIO_OE1 |
2908                                   GRC_LCLCTRL_GPIO_OE2 |
2909                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2910                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2911                 if (no_gpio2) {
2912                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2914                 }
2915                 tw32_wait_f(GRC_LOCAL_CTRL,
2916                             tp->grc_local_ctrl | grc_local_ctrl,
2917                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921                 tw32_wait_f(GRC_LOCAL_CTRL,
2922                             tp->grc_local_ctrl | grc_local_ctrl,
2923                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925                 if (!no_gpio2) {
2926                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927                         tw32_wait_f(GRC_LOCAL_CTRL,
2928                                     tp->grc_local_ctrl | grc_local_ctrl,
2929                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2930                 }
2931         }
2932 }
2933
2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936         u32 msg = 0;
2937
2938         /* Serialize power state transitions */
2939         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940                 return;
2941
2942         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943                 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945         msg = tg3_set_function_status(tp, msg);
2946
2947         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948                 goto done;
2949
2950         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951                 tg3_pwrsrc_switch_to_vaux(tp);
2952         else
2953                 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961         bool need_vaux = false;
2962
2963         /* The GPIOs do something completely different on 57765. */
2964         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965                 return;
2966
2967         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969             tg3_asic_rev(tp) == ASIC_REV_5720) {
2970                 tg3_frob_aux_power_5717(tp, include_wol ?
2971                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972                 return;
2973         }
2974
2975         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976                 struct net_device *dev_peer;
2977
2978                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980                 /* remove_one() may have been run on the peer. */
2981                 if (dev_peer) {
2982                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2985                                 return;
2986
2987                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988                             tg3_flag(tp_peer, ENABLE_ASF))
2989                                 need_vaux = true;
2990                 }
2991         }
2992
2993         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994             tg3_flag(tp, ENABLE_ASF))
2995                 need_vaux = true;
2996
2997         if (need_vaux)
2998                 tg3_pwrsrc_switch_to_vaux(tp);
2999         else
3000                 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006                 return 1;
3007         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008                 if (speed != SPEED_10)
3009                         return 1;
3010         } else if (speed == SPEED_10)
3011                 return 1;
3012
3013         return 0;
3014 }
3015
3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018         switch (tg3_asic_rev(tp)) {
3019         case ASIC_REV_5700:
3020         case ASIC_REV_5704:
3021                 return true;
3022         case ASIC_REV_5780:
3023                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024                         return true;
3025                 return false;
3026         case ASIC_REV_5717:
3027                 if (!tp->pci_fn)
3028                         return true;
3029                 return false;
3030         case ASIC_REV_5719:
3031         case ASIC_REV_5720:
3032                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033                     !tp->pci_fn)
3034                         return true;
3035                 return false;
3036         }
3037
3038         return false;
3039 }
3040
3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043         switch (tg3_asic_rev(tp)) {
3044         case ASIC_REV_5719:
3045         case ASIC_REV_5720:
3046                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047                     !tp->pci_fn)
3048                         return true;
3049                 return false;
3050         }
3051
3052         return false;
3053 }
3054
3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057         u32 val;
3058
3059         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060                 return;
3061
3062         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067                         sg_dig_ctrl |=
3068                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071                 }
3072                 return;
3073         }
3074
3075         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076                 tg3_bmcr_reset(tp);
3077                 val = tr32(GRC_MISC_CFG);
3078                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079                 udelay(40);
3080                 return;
3081         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082                 u32 phytest;
3083                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084                         u32 phy;
3085
3086                         tg3_writephy(tp, MII_ADVERTISE, 0);
3087                         tg3_writephy(tp, MII_BMCR,
3088                                      BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090                         tg3_writephy(tp, MII_TG3_FET_TEST,
3091                                      phytest | MII_TG3_FET_SHADOW_EN);
3092                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094                                 tg3_writephy(tp,
3095                                              MII_TG3_FET_SHDW_AUXMODE4,
3096                                              phy);
3097                         }
3098                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099                 }
3100                 return;
3101         } else if (do_low_power) {
3102                 if (!tg3_phy_led_bug(tp))
3103                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3109                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110         }
3111
3112         /* The PHY should not be powered down on some chips because
3113          * of bugs.
3114          */
3115         if (tg3_phy_power_bug(tp))
3116                 return;
3117
3118         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124         }
3125
3126         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, NVRAM)) {
3133                 int i;
3134
3135                 if (tp->nvram_lock_cnt == 0) {
3136                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137                         for (i = 0; i < 8000; i++) {
3138                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139                                         break;
3140                                 udelay(20);
3141                         }
3142                         if (i == 8000) {
3143                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144                                 return -ENODEV;
3145                         }
3146                 }
3147                 tp->nvram_lock_cnt++;
3148         }
3149         return 0;
3150 }
3151
3152 /* tp->lock is held. */
3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155         if (tg3_flag(tp, NVRAM)) {
3156                 if (tp->nvram_lock_cnt > 0)
3157                         tp->nvram_lock_cnt--;
3158                 if (tp->nvram_lock_cnt == 0)
3159                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160         }
3161 }
3162
3163 /* tp->lock is held. */
3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167                 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170         }
3171 }
3172
3173 /* tp->lock is held. */
3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177                 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180         }
3181 }
3182
3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184                                         u32 offset, u32 *val)
3185 {
3186         u32 tmp;
3187         int i;
3188
3189         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190                 return -EINVAL;
3191
3192         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193                                         EEPROM_ADDR_DEVID_MASK |
3194                                         EEPROM_ADDR_READ);
3195         tw32(GRC_EEPROM_ADDR,
3196              tmp |
3197              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199               EEPROM_ADDR_ADDR_MASK) |
3200              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202         for (i = 0; i < 1000; i++) {
3203                 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205                 if (tmp & EEPROM_ADDR_COMPLETE)
3206                         break;
3207                 msleep(1);
3208         }
3209         if (!(tmp & EEPROM_ADDR_COMPLETE))
3210                 return -EBUSY;
3211
3212         tmp = tr32(GRC_EEPROM_DATA);
3213
3214         /*
3215          * The data will always be opposite the native endian
3216          * format.  Perform a blind byteswap to compensate.
3217          */
3218         *val = swab32(tmp);
3219
3220         return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227         int i;
3228
3229         tw32(NVRAM_CMD, nvram_cmd);
3230         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231                 usleep_range(10, 40);
3232                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233                         udelay(10);
3234                         break;
3235                 }
3236         }
3237
3238         if (i == NVRAM_CMD_TIMEOUT)
3239                 return -EBUSY;
3240
3241         return 0;
3242 }
3243
3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246         if (tg3_flag(tp, NVRAM) &&
3247             tg3_flag(tp, NVRAM_BUFFERED) &&
3248             tg3_flag(tp, FLASH) &&
3249             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250             (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252                 addr = ((addr / tp->nvram_pagesize) <<
3253                         ATMEL_AT45DB0X1B_PAGE_POS) +
3254                        (addr % tp->nvram_pagesize);
3255
3256         return addr;
3257 }
3258
3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261         if (tg3_flag(tp, NVRAM) &&
3262             tg3_flag(tp, NVRAM_BUFFERED) &&
3263             tg3_flag(tp, FLASH) &&
3264             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265             (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268                         tp->nvram_pagesize) +
3269                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271         return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275  * the byteswapping settings for all other register accesses.
3276  * tg3 devices are BE devices, so on a BE machine, the data
3277  * returned will be exactly as it is seen in NVRAM.  On a LE
3278  * machine, the 32-bit value will be byteswapped.
3279  */
3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282         int ret;
3283
3284         if (!tg3_flag(tp, NVRAM))
3285                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287         offset = tg3_nvram_phys_addr(tp, offset);
3288
3289         if (offset > NVRAM_ADDR_MSK)
3290                 return -EINVAL;
3291
3292         ret = tg3_nvram_lock(tp);
3293         if (ret)
3294                 return ret;
3295
3296         tg3_enable_nvram_access(tp);
3297
3298         tw32(NVRAM_ADDR, offset);
3299         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302         if (ret == 0)
3303                 *val = tr32(NVRAM_RDDATA);
3304
3305         tg3_disable_nvram_access(tp);
3306
3307         tg3_nvram_unlock(tp);
3308
3309         return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315         u32 v;
3316         int res = tg3_nvram_read(tp, offset, &v);
3317         if (!res)
3318                 *val = cpu_to_be32(v);
3319         return res;
3320 }
3321
3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323                                     u32 offset, u32 len, u8 *buf)
3324 {
3325         int i, j, rc = 0;
3326         u32 val;
3327
3328         for (i = 0; i < len; i += 4) {
3329                 u32 addr;
3330                 __be32 data;
3331
3332                 addr = offset + i;
3333
3334                 memcpy(&data, buf + i, 4);
3335
3336                 /*
3337                  * The SEEPROM interface expects the data to always be opposite
3338                  * the native endian format.  We accomplish this by reversing
3339                  * all the operations that would have been performed on the
3340                  * data from a call to tg3_nvram_read_be32().
3341                  */
3342                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344                 val = tr32(GRC_EEPROM_ADDR);
3345                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348                         EEPROM_ADDR_READ);
3349                 tw32(GRC_EEPROM_ADDR, val |
3350                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351                         (addr & EEPROM_ADDR_ADDR_MASK) |
3352                         EEPROM_ADDR_START |
3353                         EEPROM_ADDR_WRITE);
3354
3355                 for (j = 0; j < 1000; j++) {
3356                         val = tr32(GRC_EEPROM_ADDR);
3357
3358                         if (val & EEPROM_ADDR_COMPLETE)
3359                                 break;
3360                         msleep(1);
3361                 }
3362                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363                         rc = -EBUSY;
3364                         break;
3365                 }
3366         }
3367
3368         return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373                 u8 *buf)
3374 {
3375         int ret = 0;
3376         u32 pagesize = tp->nvram_pagesize;
3377         u32 pagemask = pagesize - 1;
3378         u32 nvram_cmd;
3379         u8 *tmp;
3380
3381         tmp = kmalloc(pagesize, GFP_KERNEL);
3382         if (tmp == NULL)
3383                 return -ENOMEM;
3384
3385         while (len) {
3386                 int j;
3387                 u32 phy_addr, page_off, size;
3388
3389                 phy_addr = offset & ~pagemask;
3390
3391                 for (j = 0; j < pagesize; j += 4) {
3392                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393                                                   (__be32 *) (tmp + j));
3394                         if (ret)
3395                                 break;
3396                 }
3397                 if (ret)
3398                         break;
3399
3400                 page_off = offset & pagemask;
3401                 size = pagesize;
3402                 if (len < size)
3403                         size = len;
3404
3405                 len -= size;
3406
3407                 memcpy(tmp + page_off, buf, size);
3408
3409                 offset = offset + (pagesize - page_off);
3410
3411                 tg3_enable_nvram_access(tp);
3412
3413                 /*
3414                  * Before we can erase the flash page, we need
3415                  * to issue a special "write enable" command.
3416                  */
3417                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420                         break;
3421
3422                 /* Erase the target page */
3423                 tw32(NVRAM_ADDR, phy_addr);
3424
3425                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429                         break;
3430
3431                 /* Issue another write enable to start the write. */
3432                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435                         break;
3436
3437                 for (j = 0; j < pagesize; j += 4) {
3438                         __be32 data;
3439
3440                         data = *((__be32 *) (tmp + j));
3441
3442                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444                         tw32(NVRAM_ADDR, phy_addr + j);
3445
3446                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447                                 NVRAM_CMD_WR;
3448
3449                         if (j == 0)
3450                                 nvram_cmd |= NVRAM_CMD_FIRST;
3451                         else if (j == (pagesize - 4))
3452                                 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455                         if (ret)
3456                                 break;
3457                 }
3458                 if (ret)
3459                         break;
3460         }
3461
3462         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463         tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465         kfree(tmp);
3466
3467         return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472                 u8 *buf)
3473 {
3474         int i, ret = 0;
3475
3476         for (i = 0; i < len; i += 4, offset += 4) {
3477                 u32 page_off, phy_addr, nvram_cmd;
3478                 __be32 data;
3479
3480                 memcpy(&data, buf + i, 4);
3481                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483                 page_off = offset % tp->nvram_pagesize;
3484
3485                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489                 if (page_off == 0 || i == 0)
3490                         nvram_cmd |= NVRAM_CMD_FIRST;
3491                 if (page_off == (tp->nvram_pagesize - 4))
3492                         nvram_cmd |= NVRAM_CMD_LAST;
3493
3494                 if (i == (len - 4))
3495                         nvram_cmd |= NVRAM_CMD_LAST;
3496
3497                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498                     !tg3_flag(tp, FLASH) ||
3499                     !tg3_flag(tp, 57765_PLUS))
3500                         tw32(NVRAM_ADDR, phy_addr);
3501
3502                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503                     !tg3_flag(tp, 5755_PLUS) &&
3504                     (tp->nvram_jedecnum == JEDEC_ST) &&
3505                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3506                         u32 cmd;
3507
3508                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509                         ret = tg3_nvram_exec_cmd(tp, cmd);
3510                         if (ret)
3511                                 break;
3512                 }
3513                 if (!tg3_flag(tp, FLASH)) {
3514                         /* We always do complete word writes to eeprom. */
3515                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516                 }
3517
3518                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519                 if (ret)
3520                         break;
3521         }
3522         return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528         int ret;
3529
3530         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533                 udelay(40);
3534         }
3535
3536         if (!tg3_flag(tp, NVRAM)) {
3537                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538         } else {
3539                 u32 grc_mode;
3540
3541                 ret = tg3_nvram_lock(tp);
3542                 if (ret)
3543                         return ret;
3544
3545                 tg3_enable_nvram_access(tp);
3546                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547                         tw32(NVRAM_WRITE1, 0x406);
3548
3549                 grc_mode = tr32(GRC_MODE);
3550                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554                                 buf);
3555                 } else {
3556                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557                                 buf);
3558                 }
3559
3560                 grc_mode = tr32(GRC_MODE);
3561                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563                 tg3_disable_nvram_access(tp);
3564                 tg3_nvram_unlock(tp);
3565         }
3566
3567         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569                 udelay(40);
3570         }
3571
3572         return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE     0x30000
3576 #define RX_CPU_SCRATCH_SIZE     0x04000
3577 #define TX_CPU_SCRATCH_BASE     0x34000
3578 #define TX_CPU_SCRATCH_SIZE     0x04000
3579
3580 /* tp->lock is held. */
3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583         int i;
3584         const int iters = 10000;
3585
3586         for (i = 0; i < iters; i++) {
3587                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3589                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590                         break;
3591                 if (pci_channel_offline(tp->pdev))
3592                         return -EBUSY;
3593         }
3594
3595         return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3605         udelay(10);
3606
3607         return rc;
3608 }
3609
3610 /* tp->lock is held. */
3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613         return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619         tw32(cpu_base + CPU_STATE, 0xffffffff);
3620         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626         tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632         int rc;
3633
3634         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640                 return 0;
3641         }
3642         if (cpu_base == RX_CPU_BASE) {
3643                 rc = tg3_rxcpu_pause(tp);
3644         } else {
3645                 /*
3646                  * There is only an Rx CPU for the 5750 derivative in the
3647                  * BCM4785.
3648                  */
3649                 if (tg3_flag(tp, IS_SSB_CORE))
3650                         return 0;
3651
3652                 rc = tg3_txcpu_pause(tp);
3653         }
3654
3655         if (rc) {
3656                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658                 return -ENODEV;
3659         }
3660
3661         /* Clear firmware's nvram arbitration. */
3662         if (tg3_flag(tp, NVRAM))
3663                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664         return 0;
3665 }
3666
3667 static int tg3_fw_data_len(struct tg3 *tp,
3668                            const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670         int fw_len;
3671
3672         /* Non fragmented firmware have one firmware header followed by a
3673          * contiguous chunk of data to be written. The length field in that
3674          * header is not the length of data to be written but the complete
3675          * length of the bss. The data length is determined based on
3676          * tp->fw->size minus headers.
3677          *
3678          * Fragmented firmware have a main header followed by multiple
3679          * fragments. Each fragment is identical to non fragmented firmware
3680          * with a firmware header followed by a contiguous chunk of data. In
3681          * the main header, the length field is unused and set to 0xffffffff.
3682          * In each fragment header the length is the entire size of that
3683          * fragment i.e. fragment data + header length. Data length is
3684          * therefore length field in the header minus TG3_FW_HDR_LEN.
3685          */
3686         if (tp->fw_len == 0xffffffff)
3687                 fw_len = be32_to_cpu(fw_hdr->len);
3688         else
3689                 fw_len = tp->fw->size;
3690
3691         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696                                  u32 cpu_scratch_base, int cpu_scratch_size,
3697                                  const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699         int err, i;
3700         void (*write_op)(struct tg3 *, u32, u32);
3701         int total_len = tp->fw->size;
3702
3703         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704                 netdev_err(tp->dev,
3705                            "%s: Trying to load TX cpu firmware which is 5705\n",
3706                            __func__);
3707                 return -EINVAL;
3708         }
3709
3710         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711                 write_op = tg3_write_mem;
3712         else
3713                 write_op = tg3_write_indirect_reg32;
3714
3715         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716                 /* It is possible that bootcode is still loading at this point.
3717                  * Get the nvram lock first before halting the cpu.
3718                  */
3719                 int lock_err = tg3_nvram_lock(tp);
3720                 err = tg3_halt_cpu(tp, cpu_base);
3721                 if (!lock_err)
3722                         tg3_nvram_unlock(tp);
3723                 if (err)
3724                         goto out;
3725
3726                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727                         write_op(tp, cpu_scratch_base + i, 0);
3728                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729                 tw32(cpu_base + CPU_MODE,
3730                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731         } else {
3732                 /* Subtract additional main header for fragmented firmware and
3733                  * advance to the first fragment
3734                  */
3735                 total_len -= TG3_FW_HDR_LEN;
3736                 fw_hdr++;
3737         }
3738
3739         do {
3740                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742                         write_op(tp, cpu_scratch_base +
3743                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744                                      (i * sizeof(u32)),
3745                                  be32_to_cpu(fw_data[i]));
3746
3747                 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749                 /* Advance to next fragment */
3750                 fw_hdr = (struct tg3_firmware_hdr *)
3751                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752         } while (total_len > 0);
3753
3754         err = 0;
3755
3756 out:
3757         return err;
3758 }
3759
3760 /* tp->lock is held. */
3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763         int i;
3764         const int iters = 5;
3765
3766         tw32(cpu_base + CPU_STATE, 0xffffffff);
3767         tw32_f(cpu_base + CPU_PC, pc);
3768
3769         for (i = 0; i < iters; i++) {
3770                 if (tr32(cpu_base + CPU_PC) == pc)
3771                         break;
3772                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3774                 tw32_f(cpu_base + CPU_PC, pc);
3775                 udelay(1000);
3776         }
3777
3778         return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784         const struct tg3_firmware_hdr *fw_hdr;
3785         int err;
3786
3787         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789         /* Firmware blob starts with version numbers, followed by
3790            start address and length. We are setting complete length.
3791            length = end_address_of_bss - start_address_of_text.
3792            Remainder is the blob to be loaded contiguously
3793            from start address. */
3794
3795         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797                                     fw_hdr);
3798         if (err)
3799                 return err;
3800
3801         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803                                     fw_hdr);
3804         if (err)
3805                 return err;
3806
3807         /* Now startup only the RX cpu. */
3808         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809                                        be32_to_cpu(fw_hdr->base_addr));
3810         if (err) {
3811                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812                            "should be %08x\n", __func__,
3813                            tr32(RX_CPU_BASE + CPU_PC),
3814                                 be32_to_cpu(fw_hdr->base_addr));
3815                 return -ENODEV;
3816         }
3817
3818         tg3_rxcpu_resume(tp);
3819
3820         return 0;
3821 }
3822
3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825         const int iters = 1000;
3826         int i;
3827         u32 val;
3828
3829         /* Wait for boot code to complete initialization and enter service
3830          * loop. It is then safe to download service patches
3831          */
3832         for (i = 0; i < iters; i++) {
3833                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834                         break;
3835
3836                 udelay(10);
3837         }
3838
3839         if (i == iters) {
3840                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841                 return -EBUSY;
3842         }
3843
3844         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845         if (val & 0xff) {
3846                 netdev_warn(tp->dev,
3847                             "Other patches exist. Not downloading EEE patch\n");
3848                 return -EEXIST;
3849         }
3850
3851         return 0;
3852 }
3853
3854 /* tp->lock is held. */
3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857         struct tg3_firmware_hdr *fw_hdr;
3858
3859         if (!tg3_flag(tp, NO_NVRAM))
3860                 return;
3861
3862         if (tg3_validate_rxcpu_state(tp))
3863                 return;
3864
3865         if (!tp->fw)
3866                 return;
3867
3868         /* This firmware blob has a different format than older firmware
3869          * releases as given below. The main difference is we have fragmented
3870          * data to be written to non-contiguous locations.
3871          *
3872          * In the beginning we have a firmware header identical to other
3873          * firmware which consists of version, base addr and length. The length
3874          * here is unused and set to 0xffffffff.
3875          *
3876          * This is followed by a series of firmware fragments which are
3877          * individually identical to previous firmware. i.e. they have the
3878          * firmware header and followed by data for that fragment. The version
3879          * field of the individual fragment header is unused.
3880          */
3881
3882         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884                 return;
3885
3886         if (tg3_rxcpu_pause(tp))
3887                 return;
3888
3889         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892         tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898         const struct tg3_firmware_hdr *fw_hdr;
3899         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900         int err;
3901
3902         if (!tg3_flag(tp, FW_TSO))
3903                 return 0;
3904
3905         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907         /* Firmware blob starts with version numbers, followed by
3908            start address and length. We are setting complete length.
3909            length = end_address_of_bss - start_address_of_text.
3910            Remainder is the blob to be loaded contiguously
3911            from start address. */
3912
3913         cpu_scratch_size = tp->fw_len;
3914
3915         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916                 cpu_base = RX_CPU_BASE;
3917                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918         } else {
3919                 cpu_base = TX_CPU_BASE;
3920                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922         }
3923
3924         err = tg3_load_firmware_cpu(tp, cpu_base,
3925                                     cpu_scratch_base, cpu_scratch_size,
3926                                     fw_hdr);
3927         if (err)
3928                 return err;
3929
3930         /* Now startup the cpu. */
3931         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932                                        be32_to_cpu(fw_hdr->base_addr));
3933         if (err) {
3934                 netdev_err(tp->dev,
3935                            "%s fails to set CPU PC, is %08x should be %08x\n",
3936                            __func__, tr32(cpu_base + CPU_PC),
3937                            be32_to_cpu(fw_hdr->base_addr));
3938                 return -ENODEV;
3939         }
3940
3941         tg3_resume_cpu(tp, cpu_base);
3942         return 0;
3943 }
3944
3945 /* tp->lock is held. */
3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947                                    int index)
3948 {
3949         u32 addr_high, addr_low;
3950
3951         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953                     (mac_addr[4] <<  8) | mac_addr[5]);
3954
3955         if (index < 4) {
3956                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958         } else {
3959                 index -= 4;
3960                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962         }
3963 }
3964
3965 /* tp->lock is held. */
3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968         u32 addr_high;
3969         int i;
3970
3971         for (i = 0; i < 4; i++) {
3972                 if (i == 1 && skip_mac_1)
3973                         continue;
3974                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975         }
3976
3977         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978             tg3_asic_rev(tp) == ASIC_REV_5704) {
3979                 for (i = 4; i < 16; i++)
3980                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981         }
3982
3983         addr_high = (tp->dev->dev_addr[0] +
3984                      tp->dev->dev_addr[1] +
3985                      tp->dev->dev_addr[2] +
3986                      tp->dev->dev_addr[3] +
3987                      tp->dev->dev_addr[4] +
3988                      tp->dev->dev_addr[5]) &
3989                 TX_BACKOFF_SEED_MASK;
3990         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995         /*
3996          * Make sure register accesses (indirect or otherwise) will function
3997          * correctly.
3998          */
3999         pci_write_config_dword(tp->pdev,
4000                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005         int err;
4006
4007         tg3_enable_register_access(tp);
4008
4009         err = pci_set_power_state(tp->pdev, PCI_D0);
4010         if (!err) {
4011                 /* Switch out of Vaux if it is a NIC */
4012                 tg3_pwrsrc_switch_to_vmain(tp);
4013         } else {
4014                 netdev_err(tp->dev, "Transition to D0 failed\n");
4015         }
4016
4017         return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
4022 static int tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024         u32 misc_host_ctrl;
4025         bool device_should_wake, do_low_power;
4026
4027         tg3_enable_register_access(tp);
4028
4029         /* Restore the CLKREQ setting. */
4030         if (tg3_flag(tp, CLKREQ_BUG))
4031                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035         tw32(TG3PCI_MISC_HOST_CTRL,
4036              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039                              tg3_flag(tp, WOL_ENABLE);
4040
4041         if (tg3_flag(tp, USE_PHYLIB)) {
4042                 do_low_power = false;
4043                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046                         struct phy_device *phydev;
4047                         u32 phyid;
4048
4049                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053                         tp->link_config.speed = phydev->speed;
4054                         tp->link_config.duplex = phydev->duplex;
4055                         tp->link_config.autoneg = phydev->autoneg;
4056                         ethtool_convert_link_mode_to_legacy_u32(
4057                                 &tp->link_config.advertising,
4058                                 phydev->advertising);
4059
4060                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062                                          advertising);
4063                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064                                          advertising);
4065                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066                                          advertising);
4067
4068                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071                                                          advertising);
4072                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073                                                          advertising);
4074                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075                                                          advertising);
4076                                 } else {
4077                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078                                                          advertising);
4079                                 }
4080                         }
4081
4082                         linkmode_copy(phydev->advertising, advertising);
4083                         phy_start_aneg(phydev);
4084
4085                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086                         if (phyid != PHY_ID_BCMAC131) {
4087                                 phyid &= PHY_BCM_OUI_MASK;
4088                                 if (phyid == PHY_BCM_OUI_1 ||
4089                                     phyid == PHY_BCM_OUI_2 ||
4090                                     phyid == PHY_BCM_OUI_3)
4091                                         do_low_power = true;
4092                         }
4093                 }
4094         } else {
4095                 do_low_power = true;
4096
4097                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101                         tg3_setup_phy(tp, false);
4102         }
4103
4104         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105                 u32 val;
4106
4107                 val = tr32(GRC_VCPU_EXT_CTRL);
4108                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110                 int i;
4111                 u32 val;
4112
4113                 for (i = 0; i < 200; i++) {
4114                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116                                 break;
4117                         msleep(1);
4118                 }
4119         }
4120         if (tg3_flag(tp, WOL_CAP))
4121                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122                                                      WOL_DRV_STATE_SHUTDOWN |
4123                                                      WOL_DRV_WOL |
4124                                                      WOL_SET_MAGIC_PKT);
4125
4126         if (device_should_wake) {
4127                 u32 mac_mode;
4128
4129                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130                         if (do_low_power &&
4131                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132                                 tg3_phy_auxctl_write(tp,
4133                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4135                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137                                 udelay(40);
4138                         }
4139
4140                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142                         else if (tp->phy_flags &
4143                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144                                 if (tp->link_config.active_speed == SPEED_1000)
4145                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4146                                 else
4147                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4148                         } else
4149                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154                                              SPEED_100 : SPEED_10;
4155                                 if (tg3_5700_link_polarity(tp, speed))
4156                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4157                                 else
4158                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159                         }
4160                 } else {
4161                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4162                 }
4163
4164                 if (!tg3_flag(tp, 5750_PLUS))
4165                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172                 if (tg3_flag(tp, ENABLE_APE))
4173                         mac_mode |= MAC_MODE_APE_TX_EN |
4174                                     MAC_MODE_APE_RX_EN |
4175                                     MAC_MODE_TDE_ENABLE;
4176
4177                 tw32_f(MAC_MODE, mac_mode);
4178                 udelay(100);
4179
4180                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181                 udelay(10);
4182         }
4183
4184         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187                 u32 base_val;
4188
4189                 base_val = tp->pci_clock_ctrl;
4190                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191                              CLOCK_CTRL_TXCLK_DISABLE);
4192
4193                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195         } else if (tg3_flag(tp, 5780_CLASS) ||
4196                    tg3_flag(tp, CPMU_PRESENT) ||
4197                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4198                 /* do nothing */
4199         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200                 u32 newbits1, newbits2;
4201
4202                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4204                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205                                     CLOCK_CTRL_TXCLK_DISABLE |
4206                                     CLOCK_CTRL_ALTCLK);
4207                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208                 } else if (tg3_flag(tp, 5705_PLUS)) {
4209                         newbits1 = CLOCK_CTRL_625_CORE;
4210                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211                 } else {
4212                         newbits1 = CLOCK_CTRL_ALTCLK;
4213                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214                 }
4215
4216                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217                             40);
4218
4219                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220                             40);
4221
4222                 if (!tg3_flag(tp, 5705_PLUS)) {
4223                         u32 newbits3;
4224
4225                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4227                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228                                             CLOCK_CTRL_TXCLK_DISABLE |
4229                                             CLOCK_CTRL_44MHZ_CORE);
4230                         } else {
4231                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232                         }
4233
4234                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235                                     tp->pci_clock_ctrl | newbits3, 40);
4236                 }
4237         }
4238
4239         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240                 tg3_power_down_phy(tp, do_low_power);
4241
4242         tg3_frob_aux_power(tp, true);
4243
4244         /* Workaround for unstable PLL clock */
4245         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248                 u32 val = tr32(0x7d00);
4249
4250                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251                 tw32(0x7d00, val);
4252                 if (!tg3_flag(tp, ENABLE_ASF)) {
4253                         int err;
4254
4255                         err = tg3_nvram_lock(tp);
4256                         tg3_halt_cpu(tp, RX_CPU_BASE);
4257                         if (!err)
4258                                 tg3_nvram_unlock(tp);
4259                 }
4260         }
4261
4262         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266         return 0;
4267 }
4268
4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272         pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278         case MII_TG3_AUX_STAT_10HALF:
4279                 *speed = SPEED_10;
4280                 *duplex = DUPLEX_HALF;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_10FULL:
4284                 *speed = SPEED_10;
4285                 *duplex = DUPLEX_FULL;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_100HALF:
4289                 *speed = SPEED_100;
4290                 *duplex = DUPLEX_HALF;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_100FULL:
4294                 *speed = SPEED_100;
4295                 *duplex = DUPLEX_FULL;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_1000HALF:
4299                 *speed = SPEED_1000;
4300                 *duplex = DUPLEX_HALF;
4301                 break;
4302
4303         case MII_TG3_AUX_STAT_1000FULL:
4304                 *speed = SPEED_1000;
4305                 *duplex = DUPLEX_FULL;
4306                 break;
4307
4308         default:
4309                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311                                  SPEED_10;
4312                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313                                   DUPLEX_HALF;
4314                         break;
4315                 }
4316                 *speed = SPEED_UNKNOWN;
4317                 *duplex = DUPLEX_UNKNOWN;
4318                 break;
4319         }
4320 }
4321
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324         int err = 0;
4325         u32 val, new_adv;
4326
4327         new_adv = ADVERTISE_CSMA;
4328         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329         new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332         if (err)
4333                 goto done;
4334
4335         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343                 if (err)
4344                         goto done;
4345         }
4346
4347         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348                 goto done;
4349
4350         tw32(TG3_CPMU_EEE_MODE,
4351              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354         if (!err) {
4355                 u32 err2;
4356
4357                 val = 0;
4358                 /* Advertise 100-BaseTX EEE ability */
4359                 if (advertise & ADVERTISED_100baseT_Full)
4360                         val |= MDIO_AN_EEE_ADV_100TX;
4361                 /* Advertise 1000-BaseT EEE ability */
4362                 if (advertise & ADVERTISED_1000baseT_Full)
4363                         val |= MDIO_AN_EEE_ADV_1000T;
4364
4365                 if (!tp->eee.eee_enabled) {
4366                         val = 0;
4367                         tp->eee.advertised = 0;
4368                 } else {
4369                         tp->eee.advertised = advertise &
4370                                              (ADVERTISED_100baseT_Full |
4371                                               ADVERTISED_1000baseT_Full);
4372                 }
4373
4374                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375                 if (err)
4376                         val = 0;
4377
4378                 switch (tg3_asic_rev(tp)) {
4379                 case ASIC_REV_5717:
4380                 case ASIC_REV_57765:
4381                 case ASIC_REV_57766:
4382                 case ASIC_REV_5719:
4383                         /* If we advertised any eee advertisements above... */
4384                         if (val)
4385                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4386                                       MII_TG3_DSP_TAP26_RMRXSTO |
4387                                       MII_TG3_DSP_TAP26_OPCSINPT;
4388                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389                         fallthrough;
4390                 case ASIC_REV_5720:
4391                 case ASIC_REV_5762:
4392                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4395                 }
4396
4397                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398                 if (!err)
4399                         err = err2;
4400         }
4401
4402 done:
4403         return err;
4404 }
4405
4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410                 u32 adv, fc;
4411
4412                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414                         adv = ADVERTISED_10baseT_Half |
4415                               ADVERTISED_10baseT_Full;
4416                         if (tg3_flag(tp, WOL_SPEED_100MB))
4417                                 adv |= ADVERTISED_100baseT_Half |
4418                                        ADVERTISED_100baseT_Full;
4419                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420                                 if (!(tp->phy_flags &
4421                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422                                         adv |= ADVERTISED_1000baseT_Half;
4423                                 adv |= ADVERTISED_1000baseT_Full;
4424                         }
4425
4426                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427                 } else {
4428                         adv = tp->link_config.advertising;
4429                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430                                 adv &= ~(ADVERTISED_1000baseT_Half |
4431                                          ADVERTISED_1000baseT_Full);
4432
4433                         fc = tp->link_config.flowctrl;
4434                 }
4435
4436                 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440                         /* Normally during power down we want to autonegotiate
4441                          * the lowest possible speed for WOL. However, to avoid
4442                          * link flap, we leave it untouched.
4443                          */
4444                         return;
4445                 }
4446
4447                 tg3_writephy(tp, MII_BMCR,
4448                              BMCR_ANENABLE | BMCR_ANRESTART);
4449         } else {
4450                 int i;
4451                 u32 bmcr, orig_bmcr;
4452
4453                 tp->link_config.active_speed = tp->link_config.speed;
4454                 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457                         /* With autoneg disabled, 5715 only links up when the
4458                          * advertisement register has the configured speed
4459                          * enabled.
4460                          */
4461                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462                 }
4463
4464                 bmcr = 0;
4465                 switch (tp->link_config.speed) {
4466                 default:
4467                 case SPEED_10:
4468                         break;
4469
4470                 case SPEED_100:
4471                         bmcr |= BMCR_SPEED100;
4472                         break;
4473
4474                 case SPEED_1000:
4475                         bmcr |= BMCR_SPEED1000;
4476                         break;
4477                 }
4478
4479                 if (tp->link_config.duplex == DUPLEX_FULL)
4480                         bmcr |= BMCR_FULLDPLX;
4481
4482                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483                     (bmcr != orig_bmcr)) {
4484                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485                         for (i = 0; i < 1500; i++) {
4486                                 u32 tmp;
4487
4488                                 udelay(10);
4489                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490                                     tg3_readphy(tp, MII_BMSR, &tmp))
4491                                         continue;
4492                                 if (!(tmp & BMSR_LSTATUS)) {
4493                                         udelay(40);
4494                                         break;
4495                                 }
4496                         }
4497                         tg3_writephy(tp, MII_BMCR, bmcr);
4498                         udelay(40);
4499                 }
4500         }
4501 }
4502
4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505         int err;
4506         u32 val;
4507
4508         err = tg3_readphy(tp, MII_BMCR, &val);
4509         if (err)
4510                 goto done;
4511
4512         if (!(val & BMCR_ANENABLE)) {
4513                 tp->link_config.autoneg = AUTONEG_DISABLE;
4514                 tp->link_config.advertising = 0;
4515                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517                 err = -EIO;
4518
4519                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520                 case 0:
4521                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522                                 goto done;
4523
4524                         tp->link_config.speed = SPEED_10;
4525                         break;
4526                 case BMCR_SPEED100:
4527                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528                                 goto done;
4529
4530                         tp->link_config.speed = SPEED_100;
4531                         break;
4532                 case BMCR_SPEED1000:
4533                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534                                 tp->link_config.speed = SPEED_1000;
4535                                 break;
4536                         }
4537                         fallthrough;
4538                 default:
4539                         goto done;
4540                 }
4541
4542                 if (val & BMCR_FULLDPLX)
4543                         tp->link_config.duplex = DUPLEX_FULL;
4544                 else
4545                         tp->link_config.duplex = DUPLEX_HALF;
4546
4547                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549                 err = 0;
4550                 goto done;
4551         }
4552
4553         tp->link_config.autoneg = AUTONEG_ENABLE;
4554         tp->link_config.advertising = ADVERTISED_Autoneg;
4555         tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558                 u32 adv;
4559
4560                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561                 if (err)
4562                         goto done;
4563
4564                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568         } else {
4569                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570         }
4571
4572         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573                 u32 adv;
4574
4575                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4577                         if (err)
4578                                 goto done;
4579
4580                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581                 } else {
4582                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583                         if (err)
4584                                 goto done;
4585
4586                         adv = tg3_decode_flowctrl_1000X(val);
4587                         tp->link_config.flowctrl = adv;
4588
4589                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590                         adv = mii_adv_to_ethtool_adv_x(val);
4591                 }
4592
4593                 tp->link_config.advertising |= adv;
4594         }
4595
4596 done:
4597         return err;
4598 }
4599
4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602         int err;
4603
4604         /* Turn off tap power management. */
4605         /* Set Extended packet length bit */
4606         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614         udelay(40);
4615
4616         return err;
4617 }
4618
4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621         struct ethtool_eee eee;
4622
4623         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624                 return true;
4625
4626         tg3_eee_pull_config(tp, &eee);
4627
4628         if (tp->eee.eee_enabled) {
4629                 if (tp->eee.advertised != eee.advertised ||
4630                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632                         return false;
4633         } else {
4634                 /* EEE is disabled but we're advertising */
4635                 if (eee.advertised)
4636                         return false;
4637         }
4638
4639         return true;
4640 }
4641
4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644         u32 advmsk, tgtadv, advertising;
4645
4646         advertising = tp->link_config.advertising;
4647         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649         advmsk = ADVERTISE_ALL;
4650         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653         }
4654
4655         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656                 return false;
4657
4658         if ((*lcladv & advmsk) != tgtadv)
4659                 return false;
4660
4661         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662                 u32 tg3_ctrl;
4663
4664                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667                         return false;
4668
4669                 if (tgtadv &&
4670                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675                 } else {
4676                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677                 }
4678
4679                 if (tg3_ctrl != tgtadv)
4680                         return false;
4681         }
4682
4683         return true;
4684 }
4685
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688         u32 lpeth = 0;
4689
4690         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691                 u32 val;
4692
4693                 if (tg3_readphy(tp, MII_STAT1000, &val))
4694                         return false;
4695
4696                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697         }
4698
4699         if (tg3_readphy(tp, MII_LPA, rmtadv))
4700                 return false;
4701
4702         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703         tp->link_config.rmt_adv = lpeth;
4704
4705         return true;
4706 }
4707
4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710         if (curr_link_up != tp->link_up) {
4711                 if (curr_link_up) {
4712                         netif_carrier_on(tp->dev);
4713                 } else {
4714                         netif_carrier_off(tp->dev);
4715                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717                 }
4718
4719                 tg3_link_report(tp);
4720                 return true;
4721         }
4722
4723         return false;
4724 }
4725
4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728         tw32(MAC_EVENT, 0);
4729
4730         tw32_f(MAC_STATUS,
4731                MAC_STATUS_SYNC_CHANGED |
4732                MAC_STATUS_CFG_CHANGED |
4733                MAC_STATUS_MI_COMPLETION |
4734                MAC_STATUS_LNKSTATE_CHANGED);
4735         udelay(40);
4736 }
4737
4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740         u32 val;
4741
4742         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749         tw32_f(TG3_CPMU_EEE_CTRL,
4750                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754               TG3_CPMU_EEEMD_LPI_IN_RX |
4755               TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760         if (tg3_flag(tp, ENABLE_APE))
4761                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765         tw32_f(TG3_CPMU_EEE_DBTMR1,
4766                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767                (tp->eee.tx_lpi_timer & 0xffff));
4768
4769         tw32_f(TG3_CPMU_EEE_DBTMR2,
4770                TG3_CPMU_DBTMR2_APE_TX_2047US |
4771                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776         bool current_link_up;
4777         u32 bmsr, val;
4778         u32 lcl_adv, rmt_adv;
4779         u32 current_speed;
4780         u8 current_duplex;
4781         int i, err;
4782
4783         tg3_clear_mac_status(tp);
4784
4785         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786                 tw32_f(MAC_MI_MODE,
4787                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788                 udelay(80);
4789         }
4790
4791         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793         /* Some third-party PHYs need to be reset on link going
4794          * down.
4795          */
4796         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799             tp->link_up) {
4800                 tg3_readphy(tp, MII_BMSR, &bmsr);
4801                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802                     !(bmsr & BMSR_LSTATUS))
4803                         force_reset = true;
4804         }
4805         if (force_reset)
4806                 tg3_phy_reset(tp);
4807
4808         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809                 tg3_readphy(tp, MII_BMSR, &bmsr);
4810                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811                     !tg3_flag(tp, INIT_COMPLETE))
4812                         bmsr = 0;
4813
4814                 if (!(bmsr & BMSR_LSTATUS)) {
4815                         err = tg3_init_5401phy_dsp(tp);
4816                         if (err)
4817                                 return err;
4818
4819                         tg3_readphy(tp, MII_BMSR, &bmsr);
4820                         for (i = 0; i < 1000; i++) {
4821                                 udelay(10);
4822                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823                                     (bmsr & BMSR_LSTATUS)) {
4824                                         udelay(40);
4825                                         break;
4826                                 }
4827                         }
4828
4829                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830                             TG3_PHY_REV_BCM5401_B0 &&
4831                             !(bmsr & BMSR_LSTATUS) &&
4832                             tp->link_config.active_speed == SPEED_1000) {
4833                                 err = tg3_phy_reset(tp);
4834                                 if (!err)
4835                                         err = tg3_init_5401phy_dsp(tp);
4836                                 if (err)
4837                                         return err;
4838                         }
4839                 }
4840         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842                 /* 5701 {A0,B0} CRC bug workaround */
4843                 tg3_writephy(tp, 0x15, 0x0a75);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847         }
4848
4849         /* Clear pending interrupts... */
4850         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859             tg3_asic_rev(tp) == ASIC_REV_5701) {
4860                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863                 else
4864                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865         }
4866
4867         current_link_up = false;
4868         current_speed = SPEED_UNKNOWN;
4869         current_duplex = DUPLEX_UNKNOWN;
4870         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871         tp->link_config.rmt_adv = 0;
4872
4873         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874                 err = tg3_phy_auxctl_read(tp,
4875                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876                                           &val);
4877                 if (!err && !(val & (1 << 10))) {
4878                         tg3_phy_auxctl_write(tp,
4879                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880                                              val | (1 << 10));
4881                         goto relink;
4882                 }
4883         }
4884
4885         bmsr = 0;
4886         for (i = 0; i < 100; i++) {
4887                 tg3_readphy(tp, MII_BMSR, &bmsr);
4888                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889                     (bmsr & BMSR_LSTATUS))
4890                         break;
4891                 udelay(40);
4892         }
4893
4894         if (bmsr & BMSR_LSTATUS) {
4895                 u32 aux_stat, bmcr;
4896
4897                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898                 for (i = 0; i < 2000; i++) {
4899                         udelay(10);
4900                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901                             aux_stat)
4902                                 break;
4903                 }
4904
4905                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906                                              &current_speed,
4907                                              &current_duplex);
4908
4909                 bmcr = 0;
4910                 for (i = 0; i < 200; i++) {
4911                         tg3_readphy(tp, MII_BMCR, &bmcr);
4912                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913                                 continue;
4914                         if (bmcr && bmcr != 0x7fff)
4915                                 break;
4916                         udelay(10);
4917                 }
4918
4919                 lcl_adv = 0;
4920                 rmt_adv = 0;
4921
4922                 tp->link_config.active_speed = current_speed;
4923                 tp->link_config.active_duplex = current_duplex;
4924
4925                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928                         if ((bmcr & BMCR_ANENABLE) &&
4929                             eee_config_ok &&
4930                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932                                 current_link_up = true;
4933
4934                         /* EEE settings changes take effect only after a phy
4935                          * reset.  If we have skipped a reset due to Link Flap
4936                          * Avoidance being enabled, do it now.
4937                          */
4938                         if (!eee_config_ok &&
4939                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940                             !force_reset) {
4941                                 tg3_setup_eee(tp);
4942                                 tg3_phy_reset(tp);
4943                         }
4944                 } else {
4945                         if (!(bmcr & BMCR_ANENABLE) &&
4946                             tp->link_config.speed == current_speed &&
4947                             tp->link_config.duplex == current_duplex) {
4948                                 current_link_up = true;
4949                         }
4950                 }
4951
4952                 if (current_link_up &&
4953                     tp->link_config.active_duplex == DUPLEX_FULL) {
4954                         u32 reg, bit;
4955
4956                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957                                 reg = MII_TG3_FET_GEN_STAT;
4958                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959                         } else {
4960                                 reg = MII_TG3_EXT_STAT;
4961                                 bit = MII_TG3_EXT_STAT_MDIX;
4962                         }
4963
4964                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968                 }
4969         }
4970
4971 relink:
4972         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973                 tg3_phy_copper_begin(tp);
4974
4975                 if (tg3_flag(tp, ROBOSWITCH)) {
4976                         current_link_up = true;
4977                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978                         current_speed = SPEED_1000;
4979                         current_duplex = DUPLEX_FULL;
4980                         tp->link_config.active_speed = current_speed;
4981                         tp->link_config.active_duplex = current_duplex;
4982                 }
4983
4984                 tg3_readphy(tp, MII_BMSR, &bmsr);
4985                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987                         current_link_up = true;
4988         }
4989
4990         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991         if (current_link_up) {
4992                 if (tp->link_config.active_speed == SPEED_100 ||
4993                     tp->link_config.active_speed == SPEED_10)
4994                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995                 else
4996                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999         else
5000                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002         /* In order for the 5750 core in BCM4785 chip to work properly
5003          * in RGMII mode, the Led Control Register must be set up.
5004          */
5005         if (tg3_flag(tp, RGMII_MODE)) {
5006                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009                 if (tp->link_config.active_speed == SPEED_10)
5010                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011                 else if (tp->link_config.active_speed == SPEED_100)
5012                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013                                      LED_CTRL_100MBPS_ON);
5014                 else if (tp->link_config.active_speed == SPEED_1000)
5015                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016                                      LED_CTRL_1000MBPS_ON);
5017
5018                 tw32(MAC_LED_CTRL, led_ctrl);
5019                 udelay(40);
5020         }
5021
5022         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023         if (tp->link_config.active_duplex == DUPLEX_HALF)
5024                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027                 if (current_link_up &&
5028                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030                 else
5031                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032         }
5033
5034         /* ??? Without this setting Netgear GA302T PHY does not
5035          * ??? send/receive packets...
5036          */
5037         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041                 udelay(80);
5042         }
5043
5044         tw32_f(MAC_MODE, tp->mac_mode);
5045         udelay(40);
5046
5047         tg3_phy_eee_adjust(tp, current_link_up);
5048
5049         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050                 /* Polled via timer. */
5051                 tw32_f(MAC_EVENT, 0);
5052         } else {
5053                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054         }
5055         udelay(40);
5056
5057         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058             current_link_up &&
5059             tp->link_config.active_speed == SPEED_1000 &&
5060             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061                 udelay(120);
5062                 tw32_f(MAC_STATUS,
5063                      (MAC_STATUS_SYNC_CHANGED |
5064                       MAC_STATUS_CFG_CHANGED));
5065                 udelay(40);
5066                 tg3_write_mem(tp,
5067                               NIC_SRAM_FIRMWARE_MBOX,
5068                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069         }
5070
5071         /* Prevent send BD corruption. */
5072         if (tg3_flag(tp, CLKREQ_BUG)) {
5073                 if (tp->link_config.active_speed == SPEED_100 ||
5074                     tp->link_config.active_speed == SPEED_10)
5075                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5077                 else
5078                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5080         }
5081
5082         tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084         return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088         int state;
5089 #define ANEG_STATE_UNKNOWN              0
5090 #define ANEG_STATE_AN_ENABLE            1
5091 #define ANEG_STATE_RESTART_INIT         2
5092 #define ANEG_STATE_RESTART              3
5093 #define ANEG_STATE_DISABLE_LINK_OK      4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5095 #define ANEG_STATE_ABILITY_DETECT       6
5096 #define ANEG_STATE_ACK_DETECT_INIT      7
5097 #define ANEG_STATE_ACK_DETECT           8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5099 #define ANEG_STATE_COMPLETE_ACK         10
5100 #define ANEG_STATE_IDLE_DETECT_INIT     11
5101 #define ANEG_STATE_IDLE_DETECT          12
5102 #define ANEG_STATE_LINK_OK              13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5105
5106         u32 flags;
5107 #define MR_AN_ENABLE            0x00000001
5108 #define MR_RESTART_AN           0x00000002
5109 #define MR_AN_COMPLETE          0x00000004
5110 #define MR_PAGE_RX              0x00000008
5111 #define MR_NP_LOADED            0x00000010
5112 #define MR_TOGGLE_TX            0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5120 #define MR_TOGGLE_RX            0x00002000
5121 #define MR_NP_RX                0x00004000
5122
5123 #define MR_LINK_OK              0x80000000
5124
5125         unsigned long link_time, cur_time;
5126
5127         u32 ability_match_cfg;
5128         int ability_match_count;
5129
5130         char ability_match, idle_match, ack_match;
5131
5132         u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP             0x00000080
5134 #define ANEG_CFG_ACK            0x00000040
5135 #define ANEG_CFG_RF2            0x00000020
5136 #define ANEG_CFG_RF1            0x00000010
5137 #define ANEG_CFG_PS2            0x00000001
5138 #define ANEG_CFG_PS1            0x00008000
5139 #define ANEG_CFG_HD             0x00004000
5140 #define ANEG_CFG_FD             0x00002000
5141 #define ANEG_CFG_INVAL          0x00001f06
5142
5143 };
5144 #define ANEG_OK         0
5145 #define ANEG_DONE       1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED     -1
5148
5149 #define ANEG_STATE_SETTLE_TIME  10000
5150
5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152                                    struct tg3_fiber_aneginfo *ap)
5153 {
5154         u16 flowctrl;
5155         unsigned long delta;
5156         u32 rx_cfg_reg;
5157         int ret;
5158
5159         if (ap->state == ANEG_STATE_UNKNOWN) {
5160                 ap->rxconfig = 0;
5161                 ap->link_time = 0;
5162                 ap->cur_time = 0;
5163                 ap->ability_match_cfg = 0;
5164                 ap->ability_match_count = 0;
5165                 ap->ability_match = 0;
5166                 ap->idle_match = 0;
5167                 ap->ack_match = 0;
5168         }
5169         ap->cur_time++;
5170
5171         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174                 if (rx_cfg_reg != ap->ability_match_cfg) {
5175                         ap->ability_match_cfg = rx_cfg_reg;
5176                         ap->ability_match = 0;
5177                         ap->ability_match_count = 0;
5178                 } else {
5179                         if (++ap->ability_match_count > 1) {
5180                                 ap->ability_match = 1;
5181                                 ap->ability_match_cfg = rx_cfg_reg;
5182                         }
5183                 }
5184                 if (rx_cfg_reg & ANEG_CFG_ACK)
5185                         ap->ack_match = 1;
5186                 else
5187                         ap->ack_match = 0;
5188
5189                 ap->idle_match = 0;
5190         } else {
5191                 ap->idle_match = 1;
5192                 ap->ability_match_cfg = 0;
5193                 ap->ability_match_count = 0;
5194                 ap->ability_match = 0;
5195                 ap->ack_match = 0;
5196
5197                 rx_cfg_reg = 0;
5198         }
5199
5200         ap->rxconfig = rx_cfg_reg;
5201         ret = ANEG_OK;
5202
5203         switch (ap->state) {
5204         case ANEG_STATE_UNKNOWN:
5205                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206                         ap->state = ANEG_STATE_AN_ENABLE;
5207
5208                 fallthrough;
5209         case ANEG_STATE_AN_ENABLE:
5210                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211                 if (ap->flags & MR_AN_ENABLE) {
5212                         ap->link_time = 0;
5213                         ap->cur_time = 0;
5214                         ap->ability_match_cfg = 0;
5215                         ap->ability_match_count = 0;
5216                         ap->ability_match = 0;
5217                         ap->idle_match = 0;
5218                         ap->ack_match = 0;
5219
5220                         ap->state = ANEG_STATE_RESTART_INIT;
5221                 } else {
5222                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223                 }
5224                 break;
5225
5226         case ANEG_STATE_RESTART_INIT:
5227                 ap->link_time = ap->cur_time;
5228                 ap->flags &= ~(MR_NP_LOADED);
5229                 ap->txconfig = 0;
5230                 tw32(MAC_TX_AUTO_NEG, 0);
5231                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232                 tw32_f(MAC_MODE, tp->mac_mode);
5233                 udelay(40);
5234
5235                 ret = ANEG_TIMER_ENAB;
5236                 ap->state = ANEG_STATE_RESTART;
5237
5238                 fallthrough;
5239         case ANEG_STATE_RESTART:
5240                 delta = ap->cur_time - ap->link_time;
5241                 if (delta > ANEG_STATE_SETTLE_TIME)
5242                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243                 else
5244                         ret = ANEG_TIMER_ENAB;
5245                 break;
5246
5247         case ANEG_STATE_DISABLE_LINK_OK:
5248                 ret = ANEG_DONE;
5249                 break;
5250
5251         case ANEG_STATE_ABILITY_DETECT_INIT:
5252                 ap->flags &= ~(MR_TOGGLE_TX);
5253                 ap->txconfig = ANEG_CFG_FD;
5254                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255                 if (flowctrl & ADVERTISE_1000XPAUSE)
5256                         ap->txconfig |= ANEG_CFG_PS1;
5257                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258                         ap->txconfig |= ANEG_CFG_PS2;
5259                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261                 tw32_f(MAC_MODE, tp->mac_mode);
5262                 udelay(40);
5263
5264                 ap->state = ANEG_STATE_ABILITY_DETECT;
5265                 break;
5266
5267         case ANEG_STATE_ABILITY_DETECT:
5268                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270                 break;
5271
5272         case ANEG_STATE_ACK_DETECT_INIT:
5273                 ap->txconfig |= ANEG_CFG_ACK;
5274                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276                 tw32_f(MAC_MODE, tp->mac_mode);
5277                 udelay(40);
5278
5279                 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281                 fallthrough;
5282         case ANEG_STATE_ACK_DETECT:
5283                 if (ap->ack_match != 0) {
5284                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287                         } else {
5288                                 ap->state = ANEG_STATE_AN_ENABLE;
5289                         }
5290                 } else if (ap->ability_match != 0 &&
5291                            ap->rxconfig == 0) {
5292                         ap->state = ANEG_STATE_AN_ENABLE;
5293                 }
5294                 break;
5295
5296         case ANEG_STATE_COMPLETE_ACK_INIT:
5297                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298                         ret = ANEG_FAILED;
5299                         break;
5300                 }
5301                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302                                MR_LP_ADV_HALF_DUPLEX |
5303                                MR_LP_ADV_SYM_PAUSE |
5304                                MR_LP_ADV_ASYM_PAUSE |
5305                                MR_LP_ADV_REMOTE_FAULT1 |
5306                                MR_LP_ADV_REMOTE_FAULT2 |
5307                                MR_LP_ADV_NEXT_PAGE |
5308                                MR_TOGGLE_RX |
5309                                MR_NP_RX);
5310                 if (ap->rxconfig & ANEG_CFG_FD)
5311                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312                 if (ap->rxconfig & ANEG_CFG_HD)
5313                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314                 if (ap->rxconfig & ANEG_CFG_PS1)
5315                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316                 if (ap->rxconfig & ANEG_CFG_PS2)
5317                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318                 if (ap->rxconfig & ANEG_CFG_RF1)
5319                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320                 if (ap->rxconfig & ANEG_CFG_RF2)
5321                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322                 if (ap->rxconfig & ANEG_CFG_NP)
5323                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325                 ap->link_time = ap->cur_time;
5326
5327                 ap->flags ^= (MR_TOGGLE_TX);
5328                 if (ap->rxconfig & 0x0008)
5329                         ap->flags |= MR_TOGGLE_RX;
5330                 if (ap->rxconfig & ANEG_CFG_NP)
5331                         ap->flags |= MR_NP_RX;
5332                 ap->flags |= MR_PAGE_RX;
5333
5334                 ap->state = ANEG_STATE_COMPLETE_ACK;
5335                 ret = ANEG_TIMER_ENAB;
5336                 break;
5337
5338         case ANEG_STATE_COMPLETE_ACK:
5339                 if (ap->ability_match != 0 &&
5340                     ap->rxconfig == 0) {
5341                         ap->state = ANEG_STATE_AN_ENABLE;
5342                         break;
5343                 }
5344                 delta = ap->cur_time - ap->link_time;
5345                 if (delta > ANEG_STATE_SETTLE_TIME) {
5346                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348                         } else {
5349                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350                                     !(ap->flags & MR_NP_RX)) {
5351                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352                                 } else {
5353                                         ret = ANEG_FAILED;
5354                                 }
5355                         }
5356                 }
5357                 break;
5358
5359         case ANEG_STATE_IDLE_DETECT_INIT:
5360                 ap->link_time = ap->cur_time;
5361                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362                 tw32_f(MAC_MODE, tp->mac_mode);
5363                 udelay(40);
5364
5365                 ap->state = ANEG_STATE_IDLE_DETECT;
5366                 ret = ANEG_TIMER_ENAB;
5367                 break;
5368
5369         case ANEG_STATE_IDLE_DETECT:
5370                 if (ap->ability_match != 0 &&
5371                     ap->rxconfig == 0) {
5372                         ap->state = ANEG_STATE_AN_ENABLE;
5373                         break;
5374                 }
5375                 delta = ap->cur_time - ap->link_time;
5376                 if (delta > ANEG_STATE_SETTLE_TIME) {
5377                         /* XXX another gem from the Broadcom driver :( */
5378                         ap->state = ANEG_STATE_LINK_OK;
5379                 }
5380                 break;
5381
5382         case ANEG_STATE_LINK_OK:
5383                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384                 ret = ANEG_DONE;
5385                 break;
5386
5387         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388                 /* ??? unimplemented */
5389                 break;
5390
5391         case ANEG_STATE_NEXT_PAGE_WAIT:
5392                 /* ??? unimplemented */
5393                 break;
5394
5395         default:
5396                 ret = ANEG_FAILED;
5397                 break;
5398         }
5399
5400         return ret;
5401 }
5402
5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405         int res = 0;
5406         struct tg3_fiber_aneginfo aninfo;
5407         int status = ANEG_FAILED;
5408         unsigned int tick;
5409         u32 tmp;
5410
5411         tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415         udelay(40);
5416
5417         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418         udelay(40);
5419
5420         memset(&aninfo, 0, sizeof(aninfo));
5421         aninfo.flags |= MR_AN_ENABLE;
5422         aninfo.state = ANEG_STATE_UNKNOWN;
5423         aninfo.cur_time = 0;
5424         tick = 0;
5425         while (++tick < 195000) {
5426                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427                 if (status == ANEG_DONE || status == ANEG_FAILED)
5428                         break;
5429
5430                 udelay(1);
5431         }
5432
5433         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434         tw32_f(MAC_MODE, tp->mac_mode);
5435         udelay(40);
5436
5437         *txflags = aninfo.txconfig;
5438         *rxflags = aninfo.flags;
5439
5440         if (status == ANEG_DONE &&
5441             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442                              MR_LP_ADV_FULL_DUPLEX)))
5443                 res = 1;
5444
5445         return res;
5446 }
5447
5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450         u32 mac_status = tr32(MAC_STATUS);
5451         int i;
5452
5453         /* Reset when initting first time or we have a link. */
5454         if (tg3_flag(tp, INIT_COMPLETE) &&
5455             !(mac_status & MAC_STATUS_PCS_SYNCED))
5456                 return;
5457
5458         /* Set PLL lock range. */
5459         tg3_writephy(tp, 0x16, 0x8007);
5460
5461         /* SW reset */
5462         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464         /* Wait for reset to complete. */
5465         /* XXX schedule_timeout() ... */
5466         for (i = 0; i < 500; i++)
5467                 udelay(10);
5468
5469         /* Config mode; select PMA/Ch 1 regs. */
5470         tg3_writephy(tp, 0x10, 0x8411);
5471
5472         /* Enable auto-lock and comdet, select txclk for tx. */
5473         tg3_writephy(tp, 0x11, 0x0a10);
5474
5475         tg3_writephy(tp, 0x18, 0x00a0);
5476         tg3_writephy(tp, 0x16, 0x41ff);
5477
5478         /* Assert and deassert POR. */
5479         tg3_writephy(tp, 0x13, 0x0400);
5480         udelay(40);
5481         tg3_writephy(tp, 0x13, 0x0000);
5482
5483         tg3_writephy(tp, 0x11, 0x0a50);
5484         udelay(40);
5485         tg3_writephy(tp, 0x11, 0x0a10);
5486
5487         /* Wait for signal to stabilize */
5488         /* XXX schedule_timeout() ... */
5489         for (i = 0; i < 15000; i++)
5490                 udelay(10);
5491
5492         /* Deselect the channel register so we can read the PHYID
5493          * later.
5494          */
5495         tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500         u16 flowctrl;
5501         bool current_link_up;
5502         u32 sg_dig_ctrl, sg_dig_status;
5503         u32 serdes_cfg, expected_sg_dig_ctrl;
5504         int workaround, port_a;
5505
5506         serdes_cfg = 0;
5507         workaround = 0;
5508         port_a = 1;
5509         current_link_up = false;
5510
5511         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513                 workaround = 1;
5514                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5515                         port_a = 0;
5516
5517                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518                 /* preserve bits 20-23 for voltage regulator */
5519                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520         }
5521
5522         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523
5524         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526                         if (workaround) {
5527                                 u32 val = serdes_cfg;
5528
5529                                 if (port_a)
5530                                         val |= 0xc010000;
5531                                 else
5532                                         val |= 0x4010000;
5533                                 tw32_f(MAC_SERDES_CFG, val);
5534                         }
5535
5536                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537                 }
5538                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539                         tg3_setup_flow_control(tp, 0, 0);
5540                         current_link_up = true;
5541                 }
5542                 goto out;
5543         }
5544
5545         /* Want auto-negotiation.  */
5546         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547
5548         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549         if (flowctrl & ADVERTISE_1000XPAUSE)
5550                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553
5554         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556                     tp->serdes_counter &&
5557                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558                                     MAC_STATUS_RCVD_CFG)) ==
5559                      MAC_STATUS_PCS_SYNCED)) {
5560                         tp->serdes_counter--;
5561                         current_link_up = true;
5562                         goto out;
5563                 }
5564 restart_autoneg:
5565                 if (workaround)
5566                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568                 udelay(5);
5569                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570
5571                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574                                  MAC_STATUS_SIGNAL_DET)) {
5575                 sg_dig_status = tr32(SG_DIG_STATUS);
5576                 mac_status = tr32(MAC_STATUS);
5577
5578                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580                         u32 local_adv = 0, remote_adv = 0;
5581
5582                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583                                 local_adv |= ADVERTISE_1000XPAUSE;
5584                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586
5587                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588                                 remote_adv |= LPA_1000XPAUSE;
5589                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5591
5592                         tp->link_config.rmt_adv =
5593                                            mii_adv_to_ethtool_adv_x(remote_adv);
5594
5595                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5596                         current_link_up = true;
5597                         tp->serdes_counter = 0;
5598                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600                         if (tp->serdes_counter)
5601                                 tp->serdes_counter--;
5602                         else {
5603                                 if (workaround) {
5604                                         u32 val = serdes_cfg;
5605
5606                                         if (port_a)
5607                                                 val |= 0xc010000;
5608                                         else
5609                                                 val |= 0x4010000;
5610
5611                                         tw32_f(MAC_SERDES_CFG, val);
5612                                 }
5613
5614                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5615                                 udelay(40);
5616
5617                                 /* Link parallel detection - link is up */
5618                                 /* only if we have PCS_SYNC and not */
5619                                 /* receiving config code words */
5620                                 mac_status = tr32(MAC_STATUS);
5621                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623                                         tg3_setup_flow_control(tp, 0, 0);
5624                                         current_link_up = true;
5625                                         tp->phy_flags |=
5626                                                 TG3_PHYFLG_PARALLEL_DETECT;
5627                                         tp->serdes_counter =
5628                                                 SERDES_PARALLEL_DET_TIMEOUT;
5629                                 } else
5630                                         goto restart_autoneg;
5631                         }
5632                 }
5633         } else {
5634                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636         }
5637
5638 out:
5639         return current_link_up;
5640 }
5641
5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 {
5644         bool current_link_up = false;
5645
5646         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5647                 goto out;
5648
5649         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650                 u32 txflags, rxflags;
5651                 int i;
5652
5653                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654                         u32 local_adv = 0, remote_adv = 0;
5655
5656                         if (txflags & ANEG_CFG_PS1)
5657                                 local_adv |= ADVERTISE_1000XPAUSE;
5658                         if (txflags & ANEG_CFG_PS2)
5659                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660
5661                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662                                 remote_adv |= LPA_1000XPAUSE;
5663                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5665
5666                         tp->link_config.rmt_adv =
5667                                            mii_adv_to_ethtool_adv_x(remote_adv);
5668
5669                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5670
5671                         current_link_up = true;
5672                 }
5673                 for (i = 0; i < 30; i++) {
5674                         udelay(20);
5675                         tw32_f(MAC_STATUS,
5676                                (MAC_STATUS_SYNC_CHANGED |
5677                                 MAC_STATUS_CFG_CHANGED));
5678                         udelay(40);
5679                         if ((tr32(MAC_STATUS) &
5680                              (MAC_STATUS_SYNC_CHANGED |
5681                               MAC_STATUS_CFG_CHANGED)) == 0)
5682                                 break;
5683                 }
5684
5685                 mac_status = tr32(MAC_STATUS);
5686                 if (!current_link_up &&
5687                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688                     !(mac_status & MAC_STATUS_RCVD_CFG))
5689                         current_link_up = true;
5690         } else {
5691                 tg3_setup_flow_control(tp, 0, 0);
5692
5693                 /* Forcing 1000FD link up. */
5694                 current_link_up = true;
5695
5696                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5697                 udelay(40);
5698
5699                 tw32_f(MAC_MODE, tp->mac_mode);
5700                 udelay(40);
5701         }
5702
5703 out:
5704         return current_link_up;
5705 }
5706
5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5708 {
5709         u32 orig_pause_cfg;
5710         u32 orig_active_speed;
5711         u8 orig_active_duplex;
5712         u32 mac_status;
5713         bool current_link_up;
5714         int i;
5715
5716         orig_pause_cfg = tp->link_config.active_flowctrl;
5717         orig_active_speed = tp->link_config.active_speed;
5718         orig_active_duplex = tp->link_config.active_duplex;
5719
5720         if (!tg3_flag(tp, HW_AUTONEG) &&
5721             tp->link_up &&
5722             tg3_flag(tp, INIT_COMPLETE)) {
5723                 mac_status = tr32(MAC_STATUS);
5724                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725                                MAC_STATUS_SIGNAL_DET |
5726                                MAC_STATUS_CFG_CHANGED |
5727                                MAC_STATUS_RCVD_CFG);
5728                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729                                    MAC_STATUS_SIGNAL_DET)) {
5730                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731                                             MAC_STATUS_CFG_CHANGED));
5732                         return 0;
5733                 }
5734         }
5735
5736         tw32_f(MAC_TX_AUTO_NEG, 0);
5737
5738         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740         tw32_f(MAC_MODE, tp->mac_mode);
5741         udelay(40);
5742
5743         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744                 tg3_init_bcm8002(tp);
5745
5746         /* Enable link change event even when serdes polling.  */
5747         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5748         udelay(40);
5749
5750         tp->link_config.rmt_adv = 0;
5751         mac_status = tr32(MAC_STATUS);
5752
5753         if (tg3_flag(tp, HW_AUTONEG))
5754                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755         else
5756                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758         tp->napi[0].hw_status->status =
5759                 (SD_STATUS_UPDATED |
5760                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762         for (i = 0; i < 100; i++) {
5763                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764                                     MAC_STATUS_CFG_CHANGED));
5765                 udelay(5);
5766                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767                                          MAC_STATUS_CFG_CHANGED |
5768                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769                         break;
5770         }
5771
5772         mac_status = tr32(MAC_STATUS);
5773         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774                 current_link_up = false;
5775                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776                     tp->serdes_counter == 0) {
5777                         tw32_f(MAC_MODE, (tp->mac_mode |
5778                                           MAC_MODE_SEND_CONFIGS));
5779                         udelay(1);
5780                         tw32_f(MAC_MODE, tp->mac_mode);
5781                 }
5782         }
5783
5784         if (current_link_up) {
5785                 tp->link_config.active_speed = SPEED_1000;
5786                 tp->link_config.active_duplex = DUPLEX_FULL;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_1000MBPS_ON));
5790         } else {
5791                 tp->link_config.active_speed = SPEED_UNKNOWN;
5792                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794                                     LED_CTRL_LNKLED_OVERRIDE |
5795                                     LED_CTRL_TRAFFIC_OVERRIDE));
5796         }
5797
5798         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800                 if (orig_pause_cfg != now_pause_cfg ||
5801                     orig_active_speed != tp->link_config.active_speed ||
5802                     orig_active_duplex != tp->link_config.active_duplex)
5803                         tg3_link_report(tp);
5804         }
5805
5806         return 0;
5807 }
5808
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811         int err = 0;
5812         u32 bmsr, bmcr;
5813         u32 current_speed = SPEED_UNKNOWN;
5814         u8 current_duplex = DUPLEX_UNKNOWN;
5815         bool current_link_up = false;
5816         u32 local_adv, remote_adv, sgsr;
5817
5818         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821              (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823                 if (force_reset)
5824                         tg3_phy_reset(tp);
5825
5826                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830                 } else {
5831                         current_link_up = true;
5832                         if (sgsr & SERDES_TG3_SPEED_1000) {
5833                                 current_speed = SPEED_1000;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5836                                 current_speed = SPEED_100;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         } else {
5839                                 current_speed = SPEED_10;
5840                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841                         }
5842
5843                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844                                 current_duplex = DUPLEX_FULL;
5845                         else
5846                                 current_duplex = DUPLEX_HALF;
5847                 }
5848
5849                 tw32_f(MAC_MODE, tp->mac_mode);
5850                 udelay(40);
5851
5852                 tg3_clear_mac_status(tp);
5853
5854                 goto fiber_setup_done;
5855         }
5856
5857         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858         tw32_f(MAC_MODE, tp->mac_mode);
5859         udelay(40);
5860
5861         tg3_clear_mac_status(tp);
5862
5863         if (force_reset)
5864                 tg3_phy_reset(tp);
5865
5866         tp->link_config.rmt_adv = 0;
5867
5868         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872                         bmsr |= BMSR_LSTATUS;
5873                 else
5874                         bmsr &= ~BMSR_LSTATUS;
5875         }
5876
5877         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881                 /* do nothing, just check for link up at the end */
5882         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883                 u32 adv, newadv;
5884
5885                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887                                  ADVERTISE_1000XPAUSE |
5888                                  ADVERTISE_1000XPSE_ASYM |
5889                                  ADVERTISE_SLCT);
5890
5891                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5896                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897                         tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903                         return err;
5904                 }
5905         } else {
5906                 u32 new_bmcr;
5907
5908                 bmcr &= ~BMCR_SPEED1000;
5909                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911                 if (tp->link_config.duplex == DUPLEX_FULL)
5912                         new_bmcr |= BMCR_FULLDPLX;
5913
5914                 if (new_bmcr != bmcr) {
5915                         /* BMCR_SPEED1000 is a reserved bit that needs
5916                          * to be set on write.
5917                          */
5918                         new_bmcr |= BMCR_SPEED1000;
5919
5920                         /* Force a linkdown */
5921                         if (tp->link_up) {
5922                                 u32 adv;
5923
5924                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925                                 adv &= ~(ADVERTISE_1000XFULL |
5926                                          ADVERTISE_1000XHALF |
5927                                          ADVERTISE_SLCT);
5928                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5929                                 tg3_writephy(tp, MII_BMCR, bmcr |
5930                                                            BMCR_ANRESTART |
5931                                                            BMCR_ANENABLE);
5932                                 udelay(10);
5933                                 tg3_carrier_off(tp);
5934                         }
5935                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5936                         bmcr = new_bmcr;
5937                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941                                         bmsr |= BMSR_LSTATUS;
5942                                 else
5943                                         bmsr &= ~BMSR_LSTATUS;
5944                         }
5945                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946                 }
5947         }
5948
5949         if (bmsr & BMSR_LSTATUS) {
5950                 current_speed = SPEED_1000;
5951                 current_link_up = true;
5952                 if (bmcr & BMCR_FULLDPLX)
5953                         current_duplex = DUPLEX_FULL;
5954                 else
5955                         current_duplex = DUPLEX_HALF;
5956
5957                 local_adv = 0;
5958                 remote_adv = 0;
5959
5960                 if (bmcr & BMCR_ANENABLE) {
5961                         u32 common;
5962
5963                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965                         common = local_adv & remote_adv;
5966                         if (common & (ADVERTISE_1000XHALF |
5967                                       ADVERTISE_1000XFULL)) {
5968                                 if (common & ADVERTISE_1000XFULL)
5969                                         current_duplex = DUPLEX_FULL;
5970                                 else
5971                                         current_duplex = DUPLEX_HALF;
5972
5973                                 tp->link_config.rmt_adv =
5974                                            mii_adv_to_ethtool_adv_x(remote_adv);
5975                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5976                                 /* Link is up via parallel detect */
5977                         } else {
5978                                 current_link_up = false;
5979                         }
5980                 }
5981         }
5982
5983 fiber_setup_done:
5984         if (current_link_up && current_duplex == DUPLEX_FULL)
5985                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988         if (tp->link_config.active_duplex == DUPLEX_HALF)
5989                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991         tw32_f(MAC_MODE, tp->mac_mode);
5992         udelay(40);
5993
5994         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996         tp->link_config.active_speed = current_speed;
5997         tp->link_config.active_duplex = current_duplex;
5998
5999         tg3_test_and_report_link_chg(tp, current_link_up);
6000         return err;
6001 }
6002
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005         if (tp->serdes_counter) {
6006                 /* Give autoneg time to complete. */
6007                 tp->serdes_counter--;
6008                 return;
6009         }
6010
6011         if (!tp->link_up &&
6012             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013                 u32 bmcr;
6014
6015                 tg3_readphy(tp, MII_BMCR, &bmcr);
6016                 if (bmcr & BMCR_ANENABLE) {
6017                         u32 phy1, phy2;
6018
6019                         /* Select shadow register 0x1f */
6020                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023                         /* Select expansion interrupt status register */
6024                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025                                          MII_TG3_DSP_EXP1_INT_STAT);
6026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030                                 /* We have signal detect and not receiving
6031                                  * config code words, link is up by parallel
6032                                  * detection.
6033                                  */
6034
6035                                 bmcr &= ~BMCR_ANENABLE;
6036                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037                                 tg3_writephy(tp, MII_BMCR, bmcr);
6038                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039                         }
6040                 }
6041         } else if (tp->link_up &&
6042                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044                 u32 phy2;
6045
6046                 /* Select expansion interrupt status register */
6047                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048                                  MII_TG3_DSP_EXP1_INT_STAT);
6049                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050                 if (phy2 & 0x20) {
6051                         u32 bmcr;
6052
6053                         /* Config code words received, turn on autoneg. */
6054                         tg3_readphy(tp, MII_BMCR, &bmcr);
6055                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059                 }
6060         }
6061 }
6062
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065         u32 val;
6066         int err;
6067
6068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069                 err = tg3_setup_fiber_phy(tp, force_reset);
6070         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072         else
6073                 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076                 u32 scale;
6077
6078                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080                         scale = 65;
6081                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082                         scale = 6;
6083                 else
6084                         scale = 12;
6085
6086                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088                 tw32(GRC_MISC_CFG, val);
6089         }
6090
6091         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092               (6 << TX_LENGTHS_IPG_SHIFT);
6093         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094             tg3_asic_rev(tp) == ASIC_REV_5762)
6095                 val |= tr32(MAC_TX_LENGTHS) &
6096                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099         if (tp->link_config.active_speed == SPEED_1000 &&
6100             tp->link_config.active_duplex == DUPLEX_HALF)
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103         else
6104                 tw32(MAC_TX_LENGTHS, val |
6105                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107         if (!tg3_flag(tp, 5705_PLUS)) {
6108                 if (tp->link_up) {
6109                         tw32(HOSTCC_STAT_COAL_TICKS,
6110                              tp->coal.stats_block_coalesce_usecs);
6111                 } else {
6112                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113                 }
6114         }
6115
6116         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117                 val = tr32(PCIE_PWR_MGMT_THRESH);
6118                 if (!tp->link_up)
6119                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120                               tp->pwrmgmt_thresh;
6121                 else
6122                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123                 tw32(PCIE_PWR_MGMT_THRESH, val);
6124         }
6125
6126         return err;
6127 }
6128
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132         u64 stamp;
6133
6134         ptp_read_system_prets(sts);
6135         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136         ptp_read_system_postts(sts);
6137         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139         return stamp;
6140 }
6141
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157         struct tg3 *tp = netdev_priv(dev);
6158
6159         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6161                                 SOF_TIMESTAMPING_SOFTWARE;
6162
6163         if (tg3_flag(tp, PTP_CAPABLE)) {
6164                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165                                         SOF_TIMESTAMPING_RX_HARDWARE |
6166                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6167         }
6168
6169         if (tp->ptp_clock)
6170                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171         else
6172                 info->phc_index = -1;
6173
6174         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180         return 0;
6181 }
6182
6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6184 {
6185         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186         u64 correction;
6187         bool neg_adj;
6188
6189         /* Frequency adjustment is performed using hardware with a 24 bit
6190          * accumulator and a programmable correction value. On each clk, the
6191          * correction value gets added to the accumulator and when it
6192          * overflows, the time counter is incremented/decremented.
6193          */
6194         neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6195
6196         tg3_full_lock(tp, 0);
6197
6198         if (correction)
6199                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200                      TG3_EAV_REF_CLK_CORRECT_EN |
6201                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202                      ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6203         else
6204                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205
6206         tg3_full_unlock(tp);
6207
6208         return 0;
6209 }
6210
6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 {
6213         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215         tg3_full_lock(tp, 0);
6216         tp->ptp_adjust += delta;
6217         tg3_full_unlock(tp);
6218
6219         return 0;
6220 }
6221
6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223                             struct ptp_system_timestamp *sts)
6224 {
6225         u64 ns;
6226         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228         tg3_full_lock(tp, 0);
6229         ns = tg3_refclk_read(tp, sts);
6230         ns += tp->ptp_adjust;
6231         tg3_full_unlock(tp);
6232
6233         *ts = ns_to_timespec64(ns);
6234
6235         return 0;
6236 }
6237
6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239                            const struct timespec64 *ts)
6240 {
6241         u64 ns;
6242         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243
6244         ns = timespec64_to_ns(ts);
6245
6246         tg3_full_lock(tp, 0);
6247         tg3_refclk_write(tp, ns);
6248         tp->ptp_adjust = 0;
6249         tg3_full_unlock(tp);
6250
6251         return 0;
6252 }
6253
6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255                           struct ptp_clock_request *rq, int on)
6256 {
6257         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258         u32 clock_ctl;
6259         int rval = 0;
6260
6261         switch (rq->type) {
6262         case PTP_CLK_REQ_PEROUT:
6263                 /* Reject requests with unsupported flags */
6264                 if (rq->perout.flags)
6265                         return -EOPNOTSUPP;
6266
6267                 if (rq->perout.index != 0)
6268                         return -EINVAL;
6269
6270                 tg3_full_lock(tp, 0);
6271                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6273
6274                 if (on) {
6275                         u64 nsec;
6276
6277                         nsec = rq->perout.start.sec * 1000000000ULL +
6278                                rq->perout.start.nsec;
6279
6280                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6281                                 netdev_warn(tp->dev,
6282                                             "Device supports only a one-shot timesync output, period must be 0\n");
6283                                 rval = -EINVAL;
6284                                 goto err_out;
6285                         }
6286
6287                         if (nsec & (1ULL << 63)) {
6288                                 netdev_warn(tp->dev,
6289                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6290                                 rval = -EINVAL;
6291                                 goto err_out;
6292                         }
6293
6294                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295                         tw32(TG3_EAV_WATCHDOG0_MSB,
6296                              TG3_EAV_WATCHDOG0_EN |
6297                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298
6299                         tw32(TG3_EAV_REF_CLCK_CTL,
6300                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301                 } else {
6302                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304                 }
6305
6306 err_out:
6307                 tg3_full_unlock(tp);
6308                 return rval;
6309
6310         default:
6311                 break;
6312         }
6313
6314         return -EOPNOTSUPP;
6315 }
6316
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318                                      struct skb_shared_hwtstamps *timestamp)
6319 {
6320         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322                                            tp->ptp_adjust);
6323 }
6324
6325 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6326 {
6327         *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6328         *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6329 }
6330
6331 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6332 {
6333         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6334         struct skb_shared_hwtstamps timestamp;
6335         u64 hwclock;
6336
6337         if (tp->ptp_txts_retrycnt > 2)
6338                 goto done;
6339
6340         tg3_read_tx_tstamp(tp, &hwclock);
6341
6342         if (hwclock != tp->pre_tx_ts) {
6343                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6344                 skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6345                 goto done;
6346         }
6347         tp->ptp_txts_retrycnt++;
6348         return HZ / 10;
6349 done:
6350         dev_consume_skb_any(tp->tx_tstamp_skb);
6351         tp->tx_tstamp_skb = NULL;
6352         tp->ptp_txts_retrycnt = 0;
6353         tp->pre_tx_ts = 0;
6354         return -1;
6355 }
6356
6357 static const struct ptp_clock_info tg3_ptp_caps = {
6358         .owner          = THIS_MODULE,
6359         .name           = "tg3 clock",
6360         .max_adj        = 250000000,
6361         .n_alarm        = 0,
6362         .n_ext_ts       = 0,
6363         .n_per_out      = 1,
6364         .n_pins         = 0,
6365         .pps            = 0,
6366         .adjfine        = tg3_ptp_adjfine,
6367         .adjtime        = tg3_ptp_adjtime,
6368         .do_aux_work    = tg3_ptp_ts_aux_work,
6369         .gettimex64     = tg3_ptp_gettimex,
6370         .settime64      = tg3_ptp_settime,
6371         .enable         = tg3_ptp_enable,
6372 };
6373
6374 /* tp->lock must be held */
6375 static void tg3_ptp_init(struct tg3 *tp)
6376 {
6377         if (!tg3_flag(tp, PTP_CAPABLE))
6378                 return;
6379
6380         /* Initialize the hardware clock to the system time. */
6381         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6382         tp->ptp_adjust = 0;
6383         tp->ptp_info = tg3_ptp_caps;
6384 }
6385
6386 /* tp->lock must be held */
6387 static void tg3_ptp_resume(struct tg3 *tp)
6388 {
6389         if (!tg3_flag(tp, PTP_CAPABLE))
6390                 return;
6391
6392         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6393         tp->ptp_adjust = 0;
6394 }
6395
6396 static void tg3_ptp_fini(struct tg3 *tp)
6397 {
6398         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6399                 return;
6400
6401         ptp_clock_unregister(tp->ptp_clock);
6402         tp->ptp_clock = NULL;
6403         tp->ptp_adjust = 0;
6404         dev_consume_skb_any(tp->tx_tstamp_skb);
6405         tp->tx_tstamp_skb = NULL;
6406 }
6407
6408 static inline int tg3_irq_sync(struct tg3 *tp)
6409 {
6410         return tp->irq_sync;
6411 }
6412
6413 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6414 {
6415         int i;
6416
6417         dst = (u32 *)((u8 *)dst + off);
6418         for (i = 0; i < len; i += sizeof(u32))
6419                 *dst++ = tr32(off + i);
6420 }
6421
6422 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6423 {
6424         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6425         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6426         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6427         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6428         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6429         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6430         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6431         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6432         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6433         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6434         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6435         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6436         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6437         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6438         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6439         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6440         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6441         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6442         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6443
6444         if (tg3_flag(tp, SUPPORT_MSIX))
6445                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6446
6447         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6448         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6449         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6450         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6451         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6452         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6453         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6454         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6455
6456         if (!tg3_flag(tp, 5705_PLUS)) {
6457                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6458                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6459                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6460         }
6461
6462         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6463         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6464         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6465         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6466         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6467
6468         if (tg3_flag(tp, NVRAM))
6469                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6470 }
6471
6472 static void tg3_dump_state(struct tg3 *tp)
6473 {
6474         int i;
6475         u32 *regs;
6476
6477         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6478         if (!regs)
6479                 return;
6480
6481         if (tg3_flag(tp, PCI_EXPRESS)) {
6482                 /* Read up to but not including private PCI registers */
6483                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6484                         regs[i / sizeof(u32)] = tr32(i);
6485         } else
6486                 tg3_dump_legacy_regs(tp, regs);
6487
6488         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6489                 if (!regs[i + 0] && !regs[i + 1] &&
6490                     !regs[i + 2] && !regs[i + 3])
6491                         continue;
6492
6493                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6494                            i * 4,
6495                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6496         }
6497
6498         kfree(regs);
6499
6500         for (i = 0; i < tp->irq_cnt; i++) {
6501                 struct tg3_napi *tnapi = &tp->napi[i];
6502
6503                 /* SW status block */
6504                 netdev_err(tp->dev,
6505                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6506                            i,
6507                            tnapi->hw_status->status,
6508                            tnapi->hw_status->status_tag,
6509                            tnapi->hw_status->rx_jumbo_consumer,
6510                            tnapi->hw_status->rx_consumer,
6511                            tnapi->hw_status->rx_mini_consumer,
6512                            tnapi->hw_status->idx[0].rx_producer,
6513                            tnapi->hw_status->idx[0].tx_consumer);
6514
6515                 netdev_err(tp->dev,
6516                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6517                            i,
6518                            tnapi->last_tag, tnapi->last_irq_tag,
6519                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6520                            tnapi->rx_rcb_ptr,
6521                            tnapi->prodring.rx_std_prod_idx,
6522                            tnapi->prodring.rx_std_cons_idx,
6523                            tnapi->prodring.rx_jmb_prod_idx,
6524                            tnapi->prodring.rx_jmb_cons_idx);
6525         }
6526 }
6527
6528 /* This is called whenever we suspect that the system chipset is re-
6529  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6530  * is bogus tx completions. We try to recover by setting the
6531  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6532  * in the workqueue.
6533  */
6534 static void tg3_tx_recover(struct tg3 *tp)
6535 {
6536         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6537                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6538
6539         netdev_warn(tp->dev,
6540                     "The system may be re-ordering memory-mapped I/O "
6541                     "cycles to the network device, attempting to recover. "
6542                     "Please report the problem to the driver maintainer "
6543                     "and include system chipset information.\n");
6544
6545         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6546 }
6547
6548 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6549 {
6550         /* Tell compiler to fetch tx indices from memory. */
6551         barrier();
6552         return tnapi->tx_pending -
6553                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6554 }
6555
6556 /* Tigon3 never reports partial packet sends.  So we do not
6557  * need special logic to handle SKBs that have not had all
6558  * of their frags sent yet, like SunGEM does.
6559  */
6560 static void tg3_tx(struct tg3_napi *tnapi)
6561 {
6562         struct tg3 *tp = tnapi->tp;
6563         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6564         u32 sw_idx = tnapi->tx_cons;
6565         struct netdev_queue *txq;
6566         int index = tnapi - tp->napi;
6567         unsigned int pkts_compl = 0, bytes_compl = 0;
6568
6569         if (tg3_flag(tp, ENABLE_TSS))
6570                 index--;
6571
6572         txq = netdev_get_tx_queue(tp->dev, index);
6573
6574         while (sw_idx != hw_idx) {
6575                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6576                 bool complete_skb_later = false;
6577                 struct sk_buff *skb = ri->skb;
6578                 int i, tx_bug = 0;
6579
6580                 if (unlikely(skb == NULL)) {
6581                         tg3_tx_recover(tp);
6582                         return;
6583                 }
6584
6585                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6586                         struct skb_shared_hwtstamps timestamp;
6587                         u64 hwclock;
6588
6589                         tg3_read_tx_tstamp(tp, &hwclock);
6590                         if (hwclock != tp->pre_tx_ts) {
6591                                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6592                                 skb_tstamp_tx(skb, &timestamp);
6593                                 tp->pre_tx_ts = 0;
6594                         } else {
6595                                 tp->tx_tstamp_skb = skb;
6596                                 complete_skb_later = true;
6597                         }
6598                 }
6599
6600                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6601                                  skb_headlen(skb), DMA_TO_DEVICE);
6602
6603                 ri->skb = NULL;
6604
6605                 while (ri->fragmented) {
6606                         ri->fragmented = false;
6607                         sw_idx = NEXT_TX(sw_idx);
6608                         ri = &tnapi->tx_buffers[sw_idx];
6609                 }
6610
6611                 sw_idx = NEXT_TX(sw_idx);
6612
6613                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6614                         ri = &tnapi->tx_buffers[sw_idx];
6615                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6616                                 tx_bug = 1;
6617
6618                         dma_unmap_page(&tp->pdev->dev,
6619                                        dma_unmap_addr(ri, mapping),
6620                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6621                                        DMA_TO_DEVICE);
6622
6623                         while (ri->fragmented) {
6624                                 ri->fragmented = false;
6625                                 sw_idx = NEXT_TX(sw_idx);
6626                                 ri = &tnapi->tx_buffers[sw_idx];
6627                         }
6628
6629                         sw_idx = NEXT_TX(sw_idx);
6630                 }
6631
6632                 pkts_compl++;
6633                 bytes_compl += skb->len;
6634
6635                 if (!complete_skb_later)
6636                         dev_consume_skb_any(skb);
6637                 else
6638                         ptp_schedule_worker(tp->ptp_clock, 0);
6639
6640                 if (unlikely(tx_bug)) {
6641                         tg3_tx_recover(tp);
6642                         return;
6643                 }
6644         }
6645
6646         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6647
6648         tnapi->tx_cons = sw_idx;
6649
6650         /* Need to make the tx_cons update visible to __tg3_start_xmit()
6651          * before checking for netif_queue_stopped().  Without the
6652          * memory barrier, there is a small possibility that __tg3_start_xmit()
6653          * will miss it and cause the queue to be stopped forever.
6654          */
6655         smp_mb();
6656
6657         if (unlikely(netif_tx_queue_stopped(txq) &&
6658                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6659                 __netif_tx_lock(txq, smp_processor_id());
6660                 if (netif_tx_queue_stopped(txq) &&
6661                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6662                         netif_tx_wake_queue(txq);
6663                 __netif_tx_unlock(txq);
6664         }
6665 }
6666
6667 static void tg3_frag_free(bool is_frag, void *data)
6668 {
6669         if (is_frag)
6670                 skb_free_frag(data);
6671         else
6672                 kfree(data);
6673 }
6674
6675 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6676 {
6677         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6678                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6679
6680         if (!ri->data)
6681                 return;
6682
6683         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6684                          DMA_FROM_DEVICE);
6685         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6686         ri->data = NULL;
6687 }
6688
6689
6690 /* Returns size of skb allocated or < 0 on error.
6691  *
6692  * We only need to fill in the address because the other members
6693  * of the RX descriptor are invariant, see tg3_init_rings.
6694  *
6695  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6696  * posting buffers we only dirty the first cache line of the RX
6697  * descriptor (containing the address).  Whereas for the RX status
6698  * buffers the cpu only reads the last cacheline of the RX descriptor
6699  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6700  */
6701 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6702                              u32 opaque_key, u32 dest_idx_unmasked,
6703                              unsigned int *frag_size)
6704 {
6705         struct tg3_rx_buffer_desc *desc;
6706         struct ring_info *map;
6707         u8 *data;
6708         dma_addr_t mapping;
6709         int skb_size, data_size, dest_idx;
6710
6711         switch (opaque_key) {
6712         case RXD_OPAQUE_RING_STD:
6713                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6714                 desc = &tpr->rx_std[dest_idx];
6715                 map = &tpr->rx_std_buffers[dest_idx];
6716                 data_size = tp->rx_pkt_map_sz;
6717                 break;
6718
6719         case RXD_OPAQUE_RING_JUMBO:
6720                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6721                 desc = &tpr->rx_jmb[dest_idx].std;
6722                 map = &tpr->rx_jmb_buffers[dest_idx];
6723                 data_size = TG3_RX_JMB_MAP_SZ;
6724                 break;
6725
6726         default:
6727                 return -EINVAL;
6728         }
6729
6730         /* Do not overwrite any of the map or rp information
6731          * until we are sure we can commit to a new buffer.
6732          *
6733          * Callers depend upon this behavior and assume that
6734          * we leave everything unchanged if we fail.
6735          */
6736         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6737                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6738         if (skb_size <= PAGE_SIZE) {
6739                 data = napi_alloc_frag(skb_size);
6740                 *frag_size = skb_size;
6741         } else {
6742                 data = kmalloc(skb_size, GFP_ATOMIC);
6743                 *frag_size = 0;
6744         }
6745         if (!data)
6746                 return -ENOMEM;
6747
6748         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6749                                  data_size, DMA_FROM_DEVICE);
6750         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6751                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6752                 return -EIO;
6753         }
6754
6755         map->data = data;
6756         dma_unmap_addr_set(map, mapping, mapping);
6757
6758         desc->addr_hi = ((u64)mapping >> 32);
6759         desc->addr_lo = ((u64)mapping & 0xffffffff);
6760
6761         return data_size;
6762 }
6763
6764 /* We only need to move over in the address because the other
6765  * members of the RX descriptor are invariant.  See notes above
6766  * tg3_alloc_rx_data for full details.
6767  */
6768 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6769                            struct tg3_rx_prodring_set *dpr,
6770                            u32 opaque_key, int src_idx,
6771                            u32 dest_idx_unmasked)
6772 {
6773         struct tg3 *tp = tnapi->tp;
6774         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6775         struct ring_info *src_map, *dest_map;
6776         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6777         int dest_idx;
6778
6779         switch (opaque_key) {
6780         case RXD_OPAQUE_RING_STD:
6781                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6782                 dest_desc = &dpr->rx_std[dest_idx];
6783                 dest_map = &dpr->rx_std_buffers[dest_idx];
6784                 src_desc = &spr->rx_std[src_idx];
6785                 src_map = &spr->rx_std_buffers[src_idx];
6786                 break;
6787
6788         case RXD_OPAQUE_RING_JUMBO:
6789                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6790                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6791                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6792                 src_desc = &spr->rx_jmb[src_idx].std;
6793                 src_map = &spr->rx_jmb_buffers[src_idx];
6794                 break;
6795
6796         default:
6797                 return;
6798         }
6799
6800         dest_map->data = src_map->data;
6801         dma_unmap_addr_set(dest_map, mapping,
6802                            dma_unmap_addr(src_map, mapping));
6803         dest_desc->addr_hi = src_desc->addr_hi;
6804         dest_desc->addr_lo = src_desc->addr_lo;
6805
6806         /* Ensure that the update to the skb happens after the physical
6807          * addresses have been transferred to the new BD location.
6808          */
6809         smp_wmb();
6810
6811         src_map->data = NULL;
6812 }
6813
6814 /* The RX ring scheme is composed of multiple rings which post fresh
6815  * buffers to the chip, and one special ring the chip uses to report
6816  * status back to the host.
6817  *
6818  * The special ring reports the status of received packets to the
6819  * host.  The chip does not write into the original descriptor the
6820  * RX buffer was obtained from.  The chip simply takes the original
6821  * descriptor as provided by the host, updates the status and length
6822  * field, then writes this into the next status ring entry.
6823  *
6824  * Each ring the host uses to post buffers to the chip is described
6825  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6826  * it is first placed into the on-chip ram.  When the packet's length
6827  * is known, it walks down the TG3_BDINFO entries to select the ring.
6828  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6829  * which is within the range of the new packet's length is chosen.
6830  *
6831  * The "separate ring for rx status" scheme may sound queer, but it makes
6832  * sense from a cache coherency perspective.  If only the host writes
6833  * to the buffer post rings, and only the chip writes to the rx status
6834  * rings, then cache lines never move beyond shared-modified state.
6835  * If both the host and chip were to write into the same ring, cache line
6836  * eviction could occur since both entities want it in an exclusive state.
6837  */
6838 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6839 {
6840         struct tg3 *tp = tnapi->tp;
6841         u32 work_mask, rx_std_posted = 0;
6842         u32 std_prod_idx, jmb_prod_idx;
6843         u32 sw_idx = tnapi->rx_rcb_ptr;
6844         u16 hw_idx;
6845         int received;
6846         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6847
6848         hw_idx = *(tnapi->rx_rcb_prod_idx);
6849         /*
6850          * We need to order the read of hw_idx and the read of
6851          * the opaque cookie.
6852          */
6853         rmb();
6854         work_mask = 0;
6855         received = 0;
6856         std_prod_idx = tpr->rx_std_prod_idx;
6857         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6858         while (sw_idx != hw_idx && budget > 0) {
6859                 struct ring_info *ri;
6860                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6861                 unsigned int len;
6862                 struct sk_buff *skb;
6863                 dma_addr_t dma_addr;
6864                 u32 opaque_key, desc_idx, *post_ptr;
6865                 u8 *data;
6866                 u64 tstamp = 0;
6867
6868                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6869                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6870                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6871                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6872                         dma_addr = dma_unmap_addr(ri, mapping);
6873                         data = ri->data;
6874                         post_ptr = &std_prod_idx;
6875                         rx_std_posted++;
6876                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6877                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6878                         dma_addr = dma_unmap_addr(ri, mapping);
6879                         data = ri->data;
6880                         post_ptr = &jmb_prod_idx;
6881                 } else
6882                         goto next_pkt_nopost;
6883
6884                 work_mask |= opaque_key;
6885
6886                 if (desc->err_vlan & RXD_ERR_MASK) {
6887                 drop_it:
6888                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6889                                        desc_idx, *post_ptr);
6890                 drop_it_no_recycle:
6891                         /* Other statistics kept track of by card. */
6892                         tp->rx_dropped++;
6893                         goto next_pkt;
6894                 }
6895
6896                 prefetch(data + TG3_RX_OFFSET(tp));
6897                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6898                       ETH_FCS_LEN;
6899
6900                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6901                      RXD_FLAG_PTPSTAT_PTPV1 ||
6902                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6903                      RXD_FLAG_PTPSTAT_PTPV2) {
6904                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6905                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6906                 }
6907
6908                 if (len > TG3_RX_COPY_THRESH(tp)) {
6909                         int skb_size;
6910                         unsigned int frag_size;
6911
6912                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6913                                                     *post_ptr, &frag_size);
6914                         if (skb_size < 0)
6915                                 goto drop_it;
6916
6917                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6918                                          DMA_FROM_DEVICE);
6919
6920                         /* Ensure that the update to the data happens
6921                          * after the usage of the old DMA mapping.
6922                          */
6923                         smp_wmb();
6924
6925                         ri->data = NULL;
6926
6927                         if (frag_size)
6928                                 skb = build_skb(data, frag_size);
6929                         else
6930                                 skb = slab_build_skb(data);
6931                         if (!skb) {
6932                                 tg3_frag_free(frag_size != 0, data);
6933                                 goto drop_it_no_recycle;
6934                         }
6935                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6936                 } else {
6937                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6938                                        desc_idx, *post_ptr);
6939
6940                         skb = netdev_alloc_skb(tp->dev,
6941                                                len + TG3_RAW_IP_ALIGN);
6942                         if (skb == NULL)
6943                                 goto drop_it_no_recycle;
6944
6945                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6946                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6947                                                 DMA_FROM_DEVICE);
6948                         memcpy(skb->data,
6949                                data + TG3_RX_OFFSET(tp),
6950                                len);
6951                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6952                                                    len, DMA_FROM_DEVICE);
6953                 }
6954
6955                 skb_put(skb, len);
6956                 if (tstamp)
6957                         tg3_hwclock_to_timestamp(tp, tstamp,
6958                                                  skb_hwtstamps(skb));
6959
6960                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6961                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6962                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6963                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6964                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6965                 else
6966                         skb_checksum_none_assert(skb);
6967
6968                 skb->protocol = eth_type_trans(skb, tp->dev);
6969
6970                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6971                     skb->protocol != htons(ETH_P_8021Q) &&
6972                     skb->protocol != htons(ETH_P_8021AD)) {
6973                         dev_kfree_skb_any(skb);
6974                         goto drop_it_no_recycle;
6975                 }
6976
6977                 if (desc->type_flags & RXD_FLAG_VLAN &&
6978                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6979                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6980                                                desc->err_vlan & RXD_VLAN_MASK);
6981
6982                 napi_gro_receive(&tnapi->napi, skb);
6983
6984                 received++;
6985                 budget--;
6986
6987 next_pkt:
6988                 (*post_ptr)++;
6989
6990                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6991                         tpr->rx_std_prod_idx = std_prod_idx &
6992                                                tp->rx_std_ring_mask;
6993                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6994                                      tpr->rx_std_prod_idx);
6995                         work_mask &= ~RXD_OPAQUE_RING_STD;
6996                         rx_std_posted = 0;
6997                 }
6998 next_pkt_nopost:
6999                 sw_idx++;
7000                 sw_idx &= tp->rx_ret_ring_mask;
7001
7002                 /* Refresh hw_idx to see if there is new work */
7003                 if (sw_idx == hw_idx) {
7004                         hw_idx = *(tnapi->rx_rcb_prod_idx);
7005                         rmb();
7006                 }
7007         }
7008
7009         /* ACK the status ring. */
7010         tnapi->rx_rcb_ptr = sw_idx;
7011         tw32_rx_mbox(tnapi->consmbox, sw_idx);
7012
7013         /* Refill RX ring(s). */
7014         if (!tg3_flag(tp, ENABLE_RSS)) {
7015                 /* Sync BD data before updating mailbox */
7016                 wmb();
7017
7018                 if (work_mask & RXD_OPAQUE_RING_STD) {
7019                         tpr->rx_std_prod_idx = std_prod_idx &
7020                                                tp->rx_std_ring_mask;
7021                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7022                                      tpr->rx_std_prod_idx);
7023                 }
7024                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7025                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
7026                                                tp->rx_jmb_ring_mask;
7027                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7028                                      tpr->rx_jmb_prod_idx);
7029                 }
7030         } else if (work_mask) {
7031                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7032                  * updated before the producer indices can be updated.
7033                  */
7034                 smp_wmb();
7035
7036                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7037                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7038
7039                 if (tnapi != &tp->napi[1]) {
7040                         tp->rx_refill = true;
7041                         napi_schedule(&tp->napi[1].napi);
7042                 }
7043         }
7044
7045         return received;
7046 }
7047
7048 static void tg3_poll_link(struct tg3 *tp)
7049 {
7050         /* handle link change and other phy events */
7051         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7052                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7053
7054                 if (sblk->status & SD_STATUS_LINK_CHG) {
7055                         sblk->status = SD_STATUS_UPDATED |
7056                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7057                         spin_lock(&tp->lock);
7058                         if (tg3_flag(tp, USE_PHYLIB)) {
7059                                 tw32_f(MAC_STATUS,
7060                                      (MAC_STATUS_SYNC_CHANGED |
7061                                       MAC_STATUS_CFG_CHANGED |
7062                                       MAC_STATUS_MI_COMPLETION |
7063                                       MAC_STATUS_LNKSTATE_CHANGED));
7064                                 udelay(40);
7065                         } else
7066                                 tg3_setup_phy(tp, false);
7067                         spin_unlock(&tp->lock);
7068                 }
7069         }
7070 }
7071
7072 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7073                                 struct tg3_rx_prodring_set *dpr,
7074                                 struct tg3_rx_prodring_set *spr)
7075 {
7076         u32 si, di, cpycnt, src_prod_idx;
7077         int i, err = 0;
7078
7079         while (1) {
7080                 src_prod_idx = spr->rx_std_prod_idx;
7081
7082                 /* Make sure updates to the rx_std_buffers[] entries and the
7083                  * standard producer index are seen in the correct order.
7084                  */
7085                 smp_rmb();
7086
7087                 if (spr->rx_std_cons_idx == src_prod_idx)
7088                         break;
7089
7090                 if (spr->rx_std_cons_idx < src_prod_idx)
7091                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7092                 else
7093                         cpycnt = tp->rx_std_ring_mask + 1 -
7094                                  spr->rx_std_cons_idx;
7095
7096                 cpycnt = min(cpycnt,
7097                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7098
7099                 si = spr->rx_std_cons_idx;
7100                 di = dpr->rx_std_prod_idx;
7101
7102                 for (i = di; i < di + cpycnt; i++) {
7103                         if (dpr->rx_std_buffers[i].data) {
7104                                 cpycnt = i - di;
7105                                 err = -ENOSPC;
7106                                 break;
7107                         }
7108                 }
7109
7110                 if (!cpycnt)
7111                         break;
7112
7113                 /* Ensure that updates to the rx_std_buffers ring and the
7114                  * shadowed hardware producer ring from tg3_recycle_skb() are
7115                  * ordered correctly WRT the skb check above.
7116                  */
7117                 smp_rmb();
7118
7119                 memcpy(&dpr->rx_std_buffers[di],
7120                        &spr->rx_std_buffers[si],
7121                        cpycnt * sizeof(struct ring_info));
7122
7123                 for (i = 0; i < cpycnt; i++, di++, si++) {
7124                         struct tg3_rx_buffer_desc *sbd, *dbd;
7125                         sbd = &spr->rx_std[si];
7126                         dbd = &dpr->rx_std[di];
7127                         dbd->addr_hi = sbd->addr_hi;
7128                         dbd->addr_lo = sbd->addr_lo;
7129                 }
7130
7131                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7132                                        tp->rx_std_ring_mask;
7133                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7134                                        tp->rx_std_ring_mask;
7135         }
7136
7137         while (1) {
7138                 src_prod_idx = spr->rx_jmb_prod_idx;
7139
7140                 /* Make sure updates to the rx_jmb_buffers[] entries and
7141                  * the jumbo producer index are seen in the correct order.
7142                  */
7143                 smp_rmb();
7144
7145                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7146                         break;
7147
7148                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7149                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7150                 else
7151                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7152                                  spr->rx_jmb_cons_idx;
7153
7154                 cpycnt = min(cpycnt,
7155                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7156
7157                 si = spr->rx_jmb_cons_idx;
7158                 di = dpr->rx_jmb_prod_idx;
7159
7160                 for (i = di; i < di + cpycnt; i++) {
7161                         if (dpr->rx_jmb_buffers[i].data) {
7162                                 cpycnt = i - di;
7163                                 err = -ENOSPC;
7164                                 break;
7165                         }
7166                 }
7167
7168                 if (!cpycnt)
7169                         break;
7170
7171                 /* Ensure that updates to the rx_jmb_buffers ring and the
7172                  * shadowed hardware producer ring from tg3_recycle_skb() are
7173                  * ordered correctly WRT the skb check above.
7174                  */
7175                 smp_rmb();
7176
7177                 memcpy(&dpr->rx_jmb_buffers[di],
7178                        &spr->rx_jmb_buffers[si],
7179                        cpycnt * sizeof(struct ring_info));
7180
7181                 for (i = 0; i < cpycnt; i++, di++, si++) {
7182                         struct tg3_rx_buffer_desc *sbd, *dbd;
7183                         sbd = &spr->rx_jmb[si].std;
7184                         dbd = &dpr->rx_jmb[di].std;
7185                         dbd->addr_hi = sbd->addr_hi;
7186                         dbd->addr_lo = sbd->addr_lo;
7187                 }
7188
7189                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7190                                        tp->rx_jmb_ring_mask;
7191                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7192                                        tp->rx_jmb_ring_mask;
7193         }
7194
7195         return err;
7196 }
7197
7198 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7199 {
7200         struct tg3 *tp = tnapi->tp;
7201
7202         /* run TX completion thread */
7203         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7204                 tg3_tx(tnapi);
7205                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7206                         return work_done;
7207         }
7208
7209         if (!tnapi->rx_rcb_prod_idx)
7210                 return work_done;
7211
7212         /* run RX thread, within the bounds set by NAPI.
7213          * All RX "locking" is done by ensuring outside
7214          * code synchronizes with tg3->napi.poll()
7215          */
7216         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7217                 work_done += tg3_rx(tnapi, budget - work_done);
7218
7219         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7220                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7221                 int i, err = 0;
7222                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7223                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7224
7225                 tp->rx_refill = false;
7226                 for (i = 1; i <= tp->rxq_cnt; i++)
7227                         err |= tg3_rx_prodring_xfer(tp, dpr,
7228                                                     &tp->napi[i].prodring);
7229
7230                 wmb();
7231
7232                 if (std_prod_idx != dpr->rx_std_prod_idx)
7233                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7234                                      dpr->rx_std_prod_idx);
7235
7236                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7237                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7238                                      dpr->rx_jmb_prod_idx);
7239
7240                 if (err)
7241                         tw32_f(HOSTCC_MODE, tp->coal_now);
7242         }
7243
7244         return work_done;
7245 }
7246
7247 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7248 {
7249         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7250                 schedule_work(&tp->reset_task);
7251 }
7252
7253 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7254 {
7255         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7256                 cancel_work_sync(&tp->reset_task);
7257         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7258 }
7259
7260 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7261 {
7262         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7263         struct tg3 *tp = tnapi->tp;
7264         int work_done = 0;
7265         struct tg3_hw_status *sblk = tnapi->hw_status;
7266
7267         while (1) {
7268                 work_done = tg3_poll_work(tnapi, work_done, budget);
7269
7270                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7271                         goto tx_recovery;
7272
7273                 if (unlikely(work_done >= budget))
7274                         break;
7275
7276                 /* tp->last_tag is used in tg3_int_reenable() below
7277                  * to tell the hw how much work has been processed,
7278                  * so we must read it before checking for more work.
7279                  */
7280                 tnapi->last_tag = sblk->status_tag;
7281                 tnapi->last_irq_tag = tnapi->last_tag;
7282                 rmb();
7283
7284                 /* check for RX/TX work to do */
7285                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7286                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7287
7288                         /* This test here is not race free, but will reduce
7289                          * the number of interrupts by looping again.
7290                          */
7291                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7292                                 continue;
7293
7294                         napi_complete_done(napi, work_done);
7295                         /* Reenable interrupts. */
7296                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7297
7298                         /* This test here is synchronized by napi_schedule()
7299                          * and napi_complete() to close the race condition.
7300                          */
7301                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7302                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7303                                                   HOSTCC_MODE_ENABLE |
7304                                                   tnapi->coal_now);
7305                         }
7306                         break;
7307                 }
7308         }
7309
7310         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7311         return work_done;
7312
7313 tx_recovery:
7314         /* work_done is guaranteed to be less than budget. */
7315         napi_complete(napi);
7316         tg3_reset_task_schedule(tp);
7317         return work_done;
7318 }
7319
7320 static void tg3_process_error(struct tg3 *tp)
7321 {
7322         u32 val;
7323         bool real_error = false;
7324
7325         if (tg3_flag(tp, ERROR_PROCESSED))
7326                 return;
7327
7328         /* Check Flow Attention register */
7329         val = tr32(HOSTCC_FLOW_ATTN);
7330         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7331                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7332                 real_error = true;
7333         }
7334
7335         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7336                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7337                 real_error = true;
7338         }
7339
7340         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7341                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7342                 real_error = true;
7343         }
7344
7345         if (!real_error)
7346                 return;
7347
7348         tg3_dump_state(tp);
7349
7350         tg3_flag_set(tp, ERROR_PROCESSED);
7351         tg3_reset_task_schedule(tp);
7352 }
7353
7354 static int tg3_poll(struct napi_struct *napi, int budget)
7355 {
7356         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7357         struct tg3 *tp = tnapi->tp;
7358         int work_done = 0;
7359         struct tg3_hw_status *sblk = tnapi->hw_status;
7360
7361         while (1) {
7362                 if (sblk->status & SD_STATUS_ERROR)
7363                         tg3_process_error(tp);
7364
7365                 tg3_poll_link(tp);
7366
7367                 work_done = tg3_poll_work(tnapi, work_done, budget);
7368
7369                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7370                         goto tx_recovery;
7371
7372                 if (unlikely(work_done >= budget))
7373                         break;
7374
7375                 if (tg3_flag(tp, TAGGED_STATUS)) {
7376                         /* tp->last_tag is used in tg3_int_reenable() below
7377                          * to tell the hw how much work has been processed,
7378                          * so we must read it before checking for more work.
7379                          */
7380                         tnapi->last_tag = sblk->status_tag;
7381                         tnapi->last_irq_tag = tnapi->last_tag;
7382                         rmb();
7383                 } else
7384                         sblk->status &= ~SD_STATUS_UPDATED;
7385
7386                 if (likely(!tg3_has_work(tnapi))) {
7387                         napi_complete_done(napi, work_done);
7388                         tg3_int_reenable(tnapi);
7389                         break;
7390                 }
7391         }
7392
7393         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7394         return work_done;
7395
7396 tx_recovery:
7397         /* work_done is guaranteed to be less than budget. */
7398         napi_complete(napi);
7399         tg3_reset_task_schedule(tp);
7400         return work_done;
7401 }
7402
7403 static void tg3_napi_disable(struct tg3 *tp)
7404 {
7405         int i;
7406
7407         for (i = tp->irq_cnt - 1; i >= 0; i--)
7408                 napi_disable(&tp->napi[i].napi);
7409 }
7410
7411 static void tg3_napi_enable(struct tg3 *tp)
7412 {
7413         int i;
7414
7415         for (i = 0; i < tp->irq_cnt; i++)
7416                 napi_enable(&tp->napi[i].napi);
7417 }
7418
7419 static void tg3_napi_init(struct tg3 *tp)
7420 {
7421         int i;
7422
7423         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7424         for (i = 1; i < tp->irq_cnt; i++)
7425                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7426 }
7427
7428 static void tg3_napi_fini(struct tg3 *tp)
7429 {
7430         int i;
7431
7432         for (i = 0; i < tp->irq_cnt; i++)
7433                 netif_napi_del(&tp->napi[i].napi);
7434 }
7435
7436 static inline void tg3_netif_stop(struct tg3 *tp)
7437 {
7438         netif_trans_update(tp->dev);    /* prevent tx timeout */
7439         tg3_napi_disable(tp);
7440         netif_carrier_off(tp->dev);
7441         netif_tx_disable(tp->dev);
7442 }
7443
7444 /* tp->lock must be held */
7445 static inline void tg3_netif_start(struct tg3 *tp)
7446 {
7447         tg3_ptp_resume(tp);
7448
7449         /* NOTE: unconditional netif_tx_wake_all_queues is only
7450          * appropriate so long as all callers are assured to
7451          * have free tx slots (such as after tg3_init_hw)
7452          */
7453         netif_tx_wake_all_queues(tp->dev);
7454
7455         if (tp->link_up)
7456                 netif_carrier_on(tp->dev);
7457
7458         tg3_napi_enable(tp);
7459         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7460         tg3_enable_ints(tp);
7461 }
7462
7463 static void tg3_irq_quiesce(struct tg3 *tp)
7464         __releases(tp->lock)
7465         __acquires(tp->lock)
7466 {
7467         int i;
7468
7469         BUG_ON(tp->irq_sync);
7470
7471         tp->irq_sync = 1;
7472         smp_mb();
7473
7474         spin_unlock_bh(&tp->lock);
7475
7476         for (i = 0; i < tp->irq_cnt; i++)
7477                 synchronize_irq(tp->napi[i].irq_vec);
7478
7479         spin_lock_bh(&tp->lock);
7480 }
7481
7482 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7483  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7484  * with as well.  Most of the time, this is not necessary except when
7485  * shutting down the device.
7486  */
7487 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7488 {
7489         spin_lock_bh(&tp->lock);
7490         if (irq_sync)
7491                 tg3_irq_quiesce(tp);
7492 }
7493
7494 static inline void tg3_full_unlock(struct tg3 *tp)
7495 {
7496         spin_unlock_bh(&tp->lock);
7497 }
7498
7499 /* One-shot MSI handler - Chip automatically disables interrupt
7500  * after sending MSI so driver doesn't have to do it.
7501  */
7502 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7503 {
7504         struct tg3_napi *tnapi = dev_id;
7505         struct tg3 *tp = tnapi->tp;
7506
7507         prefetch(tnapi->hw_status);
7508         if (tnapi->rx_rcb)
7509                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7510
7511         if (likely(!tg3_irq_sync(tp)))
7512                 napi_schedule(&tnapi->napi);
7513
7514         return IRQ_HANDLED;
7515 }
7516
7517 /* MSI ISR - No need to check for interrupt sharing and no need to
7518  * flush status block and interrupt mailbox. PCI ordering rules
7519  * guarantee that MSI will arrive after the status block.
7520  */
7521 static irqreturn_t tg3_msi(int irq, void *dev_id)
7522 {
7523         struct tg3_napi *tnapi = dev_id;
7524         struct tg3 *tp = tnapi->tp;
7525
7526         prefetch(tnapi->hw_status);
7527         if (tnapi->rx_rcb)
7528                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7529         /*
7530          * Writing any value to intr-mbox-0 clears PCI INTA# and
7531          * chip-internal interrupt pending events.
7532          * Writing non-zero to intr-mbox-0 additional tells the
7533          * NIC to stop sending us irqs, engaging "in-intr-handler"
7534          * event coalescing.
7535          */
7536         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7537         if (likely(!tg3_irq_sync(tp)))
7538                 napi_schedule(&tnapi->napi);
7539
7540         return IRQ_RETVAL(1);
7541 }
7542
7543 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7544 {
7545         struct tg3_napi *tnapi = dev_id;
7546         struct tg3 *tp = tnapi->tp;
7547         struct tg3_hw_status *sblk = tnapi->hw_status;
7548         unsigned int handled = 1;
7549
7550         /* In INTx mode, it is possible for the interrupt to arrive at
7551          * the CPU before the status block posted prior to the interrupt.
7552          * Reading the PCI State register will confirm whether the
7553          * interrupt is ours and will flush the status block.
7554          */
7555         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7556                 if (tg3_flag(tp, CHIP_RESETTING) ||
7557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7558                         handled = 0;
7559                         goto out;
7560                 }
7561         }
7562
7563         /*
7564          * Writing any value to intr-mbox-0 clears PCI INTA# and
7565          * chip-internal interrupt pending events.
7566          * Writing non-zero to intr-mbox-0 additional tells the
7567          * NIC to stop sending us irqs, engaging "in-intr-handler"
7568          * event coalescing.
7569          *
7570          * Flush the mailbox to de-assert the IRQ immediately to prevent
7571          * spurious interrupts.  The flush impacts performance but
7572          * excessive spurious interrupts can be worse in some cases.
7573          */
7574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575         if (tg3_irq_sync(tp))
7576                 goto out;
7577         sblk->status &= ~SD_STATUS_UPDATED;
7578         if (likely(tg3_has_work(tnapi))) {
7579                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7580                 napi_schedule(&tnapi->napi);
7581         } else {
7582                 /* No work, shared interrupt perhaps?  re-enable
7583                  * interrupts, and flush that PCI write
7584                  */
7585                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7586                                0x00000000);
7587         }
7588 out:
7589         return IRQ_RETVAL(handled);
7590 }
7591
7592 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7593 {
7594         struct tg3_napi *tnapi = dev_id;
7595         struct tg3 *tp = tnapi->tp;
7596         struct tg3_hw_status *sblk = tnapi->hw_status;
7597         unsigned int handled = 1;
7598
7599         /* In INTx mode, it is possible for the interrupt to arrive at
7600          * the CPU before the status block posted prior to the interrupt.
7601          * Reading the PCI State register will confirm whether the
7602          * interrupt is ours and will flush the status block.
7603          */
7604         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7605                 if (tg3_flag(tp, CHIP_RESETTING) ||
7606                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7607                         handled = 0;
7608                         goto out;
7609                 }
7610         }
7611
7612         /*
7613          * writing any value to intr-mbox-0 clears PCI INTA# and
7614          * chip-internal interrupt pending events.
7615          * writing non-zero to intr-mbox-0 additional tells the
7616          * NIC to stop sending us irqs, engaging "in-intr-handler"
7617          * event coalescing.
7618          *
7619          * Flush the mailbox to de-assert the IRQ immediately to prevent
7620          * spurious interrupts.  The flush impacts performance but
7621          * excessive spurious interrupts can be worse in some cases.
7622          */
7623         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7624
7625         /*
7626          * In a shared interrupt configuration, sometimes other devices'
7627          * interrupts will scream.  We record the current status tag here
7628          * so that the above check can report that the screaming interrupts
7629          * are unhandled.  Eventually they will be silenced.
7630          */
7631         tnapi->last_irq_tag = sblk->status_tag;
7632
7633         if (tg3_irq_sync(tp))
7634                 goto out;
7635
7636         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7637
7638         napi_schedule(&tnapi->napi);
7639
7640 out:
7641         return IRQ_RETVAL(handled);
7642 }
7643
7644 /* ISR for interrupt test */
7645 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7646 {
7647         struct tg3_napi *tnapi = dev_id;
7648         struct tg3 *tp = tnapi->tp;
7649         struct tg3_hw_status *sblk = tnapi->hw_status;
7650
7651         if ((sblk->status & SD_STATUS_UPDATED) ||
7652             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7653                 tg3_disable_ints(tp);
7654                 return IRQ_RETVAL(1);
7655         }
7656         return IRQ_RETVAL(0);
7657 }
7658
7659 #ifdef CONFIG_NET_POLL_CONTROLLER
7660 static void tg3_poll_controller(struct net_device *dev)
7661 {
7662         int i;
7663         struct tg3 *tp = netdev_priv(dev);
7664
7665         if (tg3_irq_sync(tp))
7666                 return;
7667
7668         for (i = 0; i < tp->irq_cnt; i++)
7669                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7670 }
7671 #endif
7672
7673 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7674 {
7675         struct tg3 *tp = netdev_priv(dev);
7676
7677         if (netif_msg_tx_err(tp)) {
7678                 netdev_err(dev, "transmit timed out, resetting\n");
7679                 tg3_dump_state(tp);
7680         }
7681
7682         tg3_reset_task_schedule(tp);
7683 }
7684
7685 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7686 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7687 {
7688         u32 base = (u32) mapping & 0xffffffff;
7689
7690         return base + len + 8 < base;
7691 }
7692
7693 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7694  * of any 4GB boundaries: 4G, 8G, etc
7695  */
7696 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7697                                            u32 len, u32 mss)
7698 {
7699         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7700                 u32 base = (u32) mapping & 0xffffffff;
7701
7702                 return ((base + len + (mss & 0x3fff)) < base);
7703         }
7704         return 0;
7705 }
7706
7707 /* Test for DMA addresses > 40-bit */
7708 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7709                                           int len)
7710 {
7711 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7712         if (tg3_flag(tp, 40BIT_DMA_BUG))
7713                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7714         return 0;
7715 #else
7716         return 0;
7717 #endif
7718 }
7719
7720 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7721                                  dma_addr_t mapping, u32 len, u32 flags,
7722                                  u32 mss, u32 vlan)
7723 {
7724         txbd->addr_hi = ((u64) mapping >> 32);
7725         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7726         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7727         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7728 }
7729
7730 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7731                             dma_addr_t map, u32 len, u32 flags,
7732                             u32 mss, u32 vlan)
7733 {
7734         struct tg3 *tp = tnapi->tp;
7735         bool hwbug = false;
7736
7737         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7738                 hwbug = true;
7739
7740         if (tg3_4g_overflow_test(map, len))
7741                 hwbug = true;
7742
7743         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7744                 hwbug = true;
7745
7746         if (tg3_40bit_overflow_test(tp, map, len))
7747                 hwbug = true;
7748
7749         if (tp->dma_limit) {
7750                 u32 prvidx = *entry;
7751                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7752                 while (len > tp->dma_limit && *budget) {
7753                         u32 frag_len = tp->dma_limit;
7754                         len -= tp->dma_limit;
7755
7756                         /* Avoid the 8byte DMA problem */
7757                         if (len <= 8) {
7758                                 len += tp->dma_limit / 2;
7759                                 frag_len = tp->dma_limit / 2;
7760                         }
7761
7762                         tnapi->tx_buffers[*entry].fragmented = true;
7763
7764                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7765                                       frag_len, tmp_flag, mss, vlan);
7766                         *budget -= 1;
7767                         prvidx = *entry;
7768                         *entry = NEXT_TX(*entry);
7769
7770                         map += frag_len;
7771                 }
7772
7773                 if (len) {
7774                         if (*budget) {
7775                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7776                                               len, flags, mss, vlan);
7777                                 *budget -= 1;
7778                                 *entry = NEXT_TX(*entry);
7779                         } else {
7780                                 hwbug = true;
7781                                 tnapi->tx_buffers[prvidx].fragmented = false;
7782                         }
7783                 }
7784         } else {
7785                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7786                               len, flags, mss, vlan);
7787                 *entry = NEXT_TX(*entry);
7788         }
7789
7790         return hwbug;
7791 }
7792
7793 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7794 {
7795         int i;
7796         struct sk_buff *skb;
7797         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7798
7799         skb = txb->skb;
7800         txb->skb = NULL;
7801
7802         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7803                          skb_headlen(skb), DMA_TO_DEVICE);
7804
7805         while (txb->fragmented) {
7806                 txb->fragmented = false;
7807                 entry = NEXT_TX(entry);
7808                 txb = &tnapi->tx_buffers[entry];
7809         }
7810
7811         for (i = 0; i <= last; i++) {
7812                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7813
7814                 entry = NEXT_TX(entry);
7815                 txb = &tnapi->tx_buffers[entry];
7816
7817                 dma_unmap_page(&tnapi->tp->pdev->dev,
7818                                dma_unmap_addr(txb, mapping),
7819                                skb_frag_size(frag), DMA_TO_DEVICE);
7820
7821                 while (txb->fragmented) {
7822                         txb->fragmented = false;
7823                         entry = NEXT_TX(entry);
7824                         txb = &tnapi->tx_buffers[entry];
7825                 }
7826         }
7827 }
7828
7829 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7830 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7831                                        struct sk_buff **pskb,
7832                                        u32 *entry, u32 *budget,
7833                                        u32 base_flags, u32 mss, u32 vlan)
7834 {
7835         struct tg3 *tp = tnapi->tp;
7836         struct sk_buff *new_skb, *skb = *pskb;
7837         dma_addr_t new_addr = 0;
7838         int ret = 0;
7839
7840         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7841                 new_skb = skb_copy(skb, GFP_ATOMIC);
7842         else {
7843                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7844
7845                 new_skb = skb_copy_expand(skb,
7846                                           skb_headroom(skb) + more_headroom,
7847                                           skb_tailroom(skb), GFP_ATOMIC);
7848         }
7849
7850         if (!new_skb) {
7851                 ret = -1;
7852         } else {
7853                 /* New SKB is guaranteed to be linear. */
7854                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7855                                           new_skb->len, DMA_TO_DEVICE);
7856                 /* Make sure the mapping succeeded */
7857                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7858                         dev_kfree_skb_any(new_skb);
7859                         ret = -1;
7860                 } else {
7861                         u32 save_entry = *entry;
7862
7863                         base_flags |= TXD_FLAG_END;
7864
7865                         tnapi->tx_buffers[*entry].skb = new_skb;
7866                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7867                                            mapping, new_addr);
7868
7869                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7870                                             new_skb->len, base_flags,
7871                                             mss, vlan)) {
7872                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7873                                 dev_kfree_skb_any(new_skb);
7874                                 ret = -1;
7875                         }
7876                 }
7877         }
7878
7879         dev_consume_skb_any(skb);
7880         *pskb = new_skb;
7881         return ret;
7882 }
7883
7884 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7885 {
7886         /* Check if we will never have enough descriptors,
7887          * as gso_segs can be more than current ring size
7888          */
7889         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7890 }
7891
7892 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7893
7894 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7895  * indicated in tg3_tx_frag_set()
7896  */
7897 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7898                        struct netdev_queue *txq, struct sk_buff *skb)
7899 {
7900         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7901         struct sk_buff *segs, *seg, *next;
7902
7903         /* Estimate the number of fragments in the worst case */
7904         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7905                 netif_tx_stop_queue(txq);
7906
7907                 /* netif_tx_stop_queue() must be done before checking
7908                  * checking tx index in tg3_tx_avail() below, because in
7909                  * tg3_tx(), we update tx index before checking for
7910                  * netif_tx_queue_stopped().
7911                  */
7912                 smp_mb();
7913                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7914                         return NETDEV_TX_BUSY;
7915
7916                 netif_tx_wake_queue(txq);
7917         }
7918
7919         segs = skb_gso_segment(skb, tp->dev->features &
7920                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7921         if (IS_ERR(segs) || !segs)
7922                 goto tg3_tso_bug_end;
7923
7924         skb_list_walk_safe(segs, seg, next) {
7925                 skb_mark_not_on_list(seg);
7926                 __tg3_start_xmit(seg, tp->dev);
7927         }
7928
7929 tg3_tso_bug_end:
7930         dev_consume_skb_any(skb);
7931
7932         return NETDEV_TX_OK;
7933 }
7934
7935 /* hard_start_xmit for all devices */
7936 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7937 {
7938         struct tg3 *tp = netdev_priv(dev);
7939         u32 len, entry, base_flags, mss, vlan = 0;
7940         u32 budget;
7941         int i = -1, would_hit_hwbug;
7942         dma_addr_t mapping;
7943         struct tg3_napi *tnapi;
7944         struct netdev_queue *txq;
7945         unsigned int last;
7946         struct iphdr *iph = NULL;
7947         struct tcphdr *tcph = NULL;
7948         __sum16 tcp_csum = 0, ip_csum = 0;
7949         __be16 ip_tot_len = 0;
7950
7951         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7952         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7953         if (tg3_flag(tp, ENABLE_TSS))
7954                 tnapi++;
7955
7956         budget = tg3_tx_avail(tnapi);
7957
7958         /* We are running in BH disabled context with netif_tx_lock
7959          * and TX reclaim runs via tp->napi.poll inside of a software
7960          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7961          * no IRQ context deadlocks to worry about either.  Rejoice!
7962          */
7963         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7964                 if (!netif_tx_queue_stopped(txq)) {
7965                         netif_tx_stop_queue(txq);
7966
7967                         /* This is a hard error, log it. */
7968                         netdev_err(dev,
7969                                    "BUG! Tx Ring full when queue awake!\n");
7970                 }
7971                 return NETDEV_TX_BUSY;
7972         }
7973
7974         entry = tnapi->tx_prod;
7975         base_flags = 0;
7976
7977         mss = skb_shinfo(skb)->gso_size;
7978         if (mss) {
7979                 u32 tcp_opt_len, hdr_len;
7980
7981                 if (skb_cow_head(skb, 0))
7982                         goto drop;
7983
7984                 iph = ip_hdr(skb);
7985                 tcp_opt_len = tcp_optlen(skb);
7986
7987                 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7988
7989                 /* HW/FW can not correctly segment packets that have been
7990                  * vlan encapsulated.
7991                  */
7992                 if (skb->protocol == htons(ETH_P_8021Q) ||
7993                     skb->protocol == htons(ETH_P_8021AD)) {
7994                         if (tg3_tso_bug_gso_check(tnapi, skb))
7995                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7996                         goto drop;
7997                 }
7998
7999                 if (!skb_is_gso_v6(skb)) {
8000                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8001                             tg3_flag(tp, TSO_BUG)) {
8002                                 if (tg3_tso_bug_gso_check(tnapi, skb))
8003                                         return tg3_tso_bug(tp, tnapi, txq, skb);
8004                                 goto drop;
8005                         }
8006                         ip_csum = iph->check;
8007                         ip_tot_len = iph->tot_len;
8008                         iph->check = 0;
8009                         iph->tot_len = htons(mss + hdr_len);
8010                 }
8011
8012                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8013                                TXD_FLAG_CPU_POST_DMA);
8014
8015                 tcph = tcp_hdr(skb);
8016                 tcp_csum = tcph->check;
8017
8018                 if (tg3_flag(tp, HW_TSO_1) ||
8019                     tg3_flag(tp, HW_TSO_2) ||
8020                     tg3_flag(tp, HW_TSO_3)) {
8021                         tcph->check = 0;
8022                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8023                 } else {
8024                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8025                                                          0, IPPROTO_TCP, 0);
8026                 }
8027
8028                 if (tg3_flag(tp, HW_TSO_3)) {
8029                         mss |= (hdr_len & 0xc) << 12;
8030                         if (hdr_len & 0x10)
8031                                 base_flags |= 0x00000010;
8032                         base_flags |= (hdr_len & 0x3e0) << 5;
8033                 } else if (tg3_flag(tp, HW_TSO_2))
8034                         mss |= hdr_len << 9;
8035                 else if (tg3_flag(tp, HW_TSO_1) ||
8036                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8037                         if (tcp_opt_len || iph->ihl > 5) {
8038                                 int tsflags;
8039
8040                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8041                                 mss |= (tsflags << 11);
8042                         }
8043                 } else {
8044                         if (tcp_opt_len || iph->ihl > 5) {
8045                                 int tsflags;
8046
8047                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8048                                 base_flags |= tsflags << 12;
8049                         }
8050                 }
8051         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8052                 /* HW/FW can not correctly checksum packets that have been
8053                  * vlan encapsulated.
8054                  */
8055                 if (skb->protocol == htons(ETH_P_8021Q) ||
8056                     skb->protocol == htons(ETH_P_8021AD)) {
8057                         if (skb_checksum_help(skb))
8058                                 goto drop;
8059                 } else  {
8060                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8061                 }
8062         }
8063
8064         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8065             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8066                 base_flags |= TXD_FLAG_JMB_PKT;
8067
8068         if (skb_vlan_tag_present(skb)) {
8069                 base_flags |= TXD_FLAG_VLAN;
8070                 vlan = skb_vlan_tag_get(skb);
8071         }
8072
8073         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8074             tg3_flag(tp, TX_TSTAMP_EN)) {
8075                 tg3_full_lock(tp, 0);
8076                 if (!tp->pre_tx_ts) {
8077                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8078                         base_flags |= TXD_FLAG_HWTSTAMP;
8079                         tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8080                 }
8081                 tg3_full_unlock(tp);
8082         }
8083
8084         len = skb_headlen(skb);
8085
8086         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8087                                  DMA_TO_DEVICE);
8088         if (dma_mapping_error(&tp->pdev->dev, mapping))
8089                 goto drop;
8090
8091
8092         tnapi->tx_buffers[entry].skb = skb;
8093         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8094
8095         would_hit_hwbug = 0;
8096
8097         if (tg3_flag(tp, 5701_DMA_BUG))
8098                 would_hit_hwbug = 1;
8099
8100         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8101                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8102                             mss, vlan)) {
8103                 would_hit_hwbug = 1;
8104         } else if (skb_shinfo(skb)->nr_frags > 0) {
8105                 u32 tmp_mss = mss;
8106
8107                 if (!tg3_flag(tp, HW_TSO_1) &&
8108                     !tg3_flag(tp, HW_TSO_2) &&
8109                     !tg3_flag(tp, HW_TSO_3))
8110                         tmp_mss = 0;
8111
8112                 /* Now loop through additional data
8113                  * fragments, and queue them.
8114                  */
8115                 last = skb_shinfo(skb)->nr_frags - 1;
8116                 for (i = 0; i <= last; i++) {
8117                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8118
8119                         len = skb_frag_size(frag);
8120                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8121                                                    len, DMA_TO_DEVICE);
8122
8123                         tnapi->tx_buffers[entry].skb = NULL;
8124                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8125                                            mapping);
8126                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8127                                 goto dma_error;
8128
8129                         if (!budget ||
8130                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8131                                             len, base_flags |
8132                                             ((i == last) ? TXD_FLAG_END : 0),
8133                                             tmp_mss, vlan)) {
8134                                 would_hit_hwbug = 1;
8135                                 break;
8136                         }
8137                 }
8138         }
8139
8140         if (would_hit_hwbug) {
8141                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8142
8143                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8144                         /* If it's a TSO packet, do GSO instead of
8145                          * allocating and copying to a large linear SKB
8146                          */
8147                         if (ip_tot_len) {
8148                                 iph->check = ip_csum;
8149                                 iph->tot_len = ip_tot_len;
8150                         }
8151                         tcph->check = tcp_csum;
8152                         return tg3_tso_bug(tp, tnapi, txq, skb);
8153                 }
8154
8155                 /* If the workaround fails due to memory/mapping
8156                  * failure, silently drop this packet.
8157                  */
8158                 entry = tnapi->tx_prod;
8159                 budget = tg3_tx_avail(tnapi);
8160                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8161                                                 base_flags, mss, vlan))
8162                         goto drop_nofree;
8163         }
8164
8165         skb_tx_timestamp(skb);
8166         netdev_tx_sent_queue(txq, skb->len);
8167
8168         /* Sync BD data before updating mailbox */
8169         wmb();
8170
8171         tnapi->tx_prod = entry;
8172         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8173                 netif_tx_stop_queue(txq);
8174
8175                 /* netif_tx_stop_queue() must be done before checking
8176                  * checking tx index in tg3_tx_avail() below, because in
8177                  * tg3_tx(), we update tx index before checking for
8178                  * netif_tx_queue_stopped().
8179                  */
8180                 smp_mb();
8181                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8182                         netif_tx_wake_queue(txq);
8183         }
8184
8185         return NETDEV_TX_OK;
8186
8187 dma_error:
8188         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8189         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8190 drop:
8191         dev_kfree_skb_any(skb);
8192 drop_nofree:
8193         tp->tx_dropped++;
8194         return NETDEV_TX_OK;
8195 }
8196
8197 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8198 {
8199         struct netdev_queue *txq;
8200         u16 skb_queue_mapping;
8201         netdev_tx_t ret;
8202
8203         skb_queue_mapping = skb_get_queue_mapping(skb);
8204         txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8205
8206         ret = __tg3_start_xmit(skb, dev);
8207
8208         /* Notify the hardware that packets are ready by updating the TX ring
8209          * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8210          * the hardware for every packet. To guarantee forward progress the TX
8211          * ring must be drained when it is full as indicated by
8212          * netif_xmit_stopped(). This needs to happen even when the current
8213          * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8214          * queued by previous __tg3_start_xmit() calls might get stuck in
8215          * the queue forever.
8216          */
8217         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8218                 struct tg3_napi *tnapi;
8219                 struct tg3 *tp;
8220
8221                 tp = netdev_priv(dev);
8222                 tnapi = &tp->napi[skb_queue_mapping];
8223
8224                 if (tg3_flag(tp, ENABLE_TSS))
8225                         tnapi++;
8226
8227                 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8228         }
8229
8230         return ret;
8231 }
8232
8233 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8234 {
8235         if (enable) {
8236                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8237                                   MAC_MODE_PORT_MODE_MASK);
8238
8239                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8240
8241                 if (!tg3_flag(tp, 5705_PLUS))
8242                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8243
8244                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8245                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8246                 else
8247                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8248         } else {
8249                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8250
8251                 if (tg3_flag(tp, 5705_PLUS) ||
8252                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8253                     tg3_asic_rev(tp) == ASIC_REV_5700)
8254                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8255         }
8256
8257         tw32(MAC_MODE, tp->mac_mode);
8258         udelay(40);
8259 }
8260
8261 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8262 {
8263         u32 val, bmcr, mac_mode, ptest = 0;
8264
8265         tg3_phy_toggle_apd(tp, false);
8266         tg3_phy_toggle_automdix(tp, false);
8267
8268         if (extlpbk && tg3_phy_set_extloopbk(tp))
8269                 return -EIO;
8270
8271         bmcr = BMCR_FULLDPLX;
8272         switch (speed) {
8273         case SPEED_10:
8274                 break;
8275         case SPEED_100:
8276                 bmcr |= BMCR_SPEED100;
8277                 break;
8278         case SPEED_1000:
8279         default:
8280                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8281                         speed = SPEED_100;
8282                         bmcr |= BMCR_SPEED100;
8283                 } else {
8284                         speed = SPEED_1000;
8285                         bmcr |= BMCR_SPEED1000;
8286                 }
8287         }
8288
8289         if (extlpbk) {
8290                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8291                         tg3_readphy(tp, MII_CTRL1000, &val);
8292                         val |= CTL1000_AS_MASTER |
8293                                CTL1000_ENABLE_MASTER;
8294                         tg3_writephy(tp, MII_CTRL1000, val);
8295                 } else {
8296                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8297                                 MII_TG3_FET_PTEST_TRIM_2;
8298                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8299                 }
8300         } else
8301                 bmcr |= BMCR_LOOPBACK;
8302
8303         tg3_writephy(tp, MII_BMCR, bmcr);
8304
8305         /* The write needs to be flushed for the FETs */
8306         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8307                 tg3_readphy(tp, MII_BMCR, &bmcr);
8308
8309         udelay(40);
8310
8311         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8312             tg3_asic_rev(tp) == ASIC_REV_5785) {
8313                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8314                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8315                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8316
8317                 /* The write needs to be flushed for the AC131 */
8318                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8319         }
8320
8321         /* Reset to prevent losing 1st rx packet intermittently */
8322         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8323             tg3_flag(tp, 5780_CLASS)) {
8324                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8325                 udelay(10);
8326                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8327         }
8328
8329         mac_mode = tp->mac_mode &
8330                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8331         if (speed == SPEED_1000)
8332                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8333         else
8334                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8335
8336         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8337                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8338
8339                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8340                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8341                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8342                         mac_mode |= MAC_MODE_LINK_POLARITY;
8343
8344                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8345                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8346         }
8347
8348         tw32(MAC_MODE, mac_mode);
8349         udelay(40);
8350
8351         return 0;
8352 }
8353
8354 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8355 {
8356         struct tg3 *tp = netdev_priv(dev);
8357
8358         if (features & NETIF_F_LOOPBACK) {
8359                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8360                         return;
8361
8362                 spin_lock_bh(&tp->lock);
8363                 tg3_mac_loopback(tp, true);
8364                 netif_carrier_on(tp->dev);
8365                 spin_unlock_bh(&tp->lock);
8366                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8367         } else {
8368                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8369                         return;
8370
8371                 spin_lock_bh(&tp->lock);
8372                 tg3_mac_loopback(tp, false);
8373                 /* Force link status check */
8374                 tg3_setup_phy(tp, true);
8375                 spin_unlock_bh(&tp->lock);
8376                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8377         }
8378 }
8379
8380 static netdev_features_t tg3_fix_features(struct net_device *dev,
8381         netdev_features_t features)
8382 {
8383         struct tg3 *tp = netdev_priv(dev);
8384
8385         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8386                 features &= ~NETIF_F_ALL_TSO;
8387
8388         return features;
8389 }
8390
8391 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8392 {
8393         netdev_features_t changed = dev->features ^ features;
8394
8395         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8396                 tg3_set_loopback(dev, features);
8397
8398         return 0;
8399 }
8400
8401 static void tg3_rx_prodring_free(struct tg3 *tp,
8402                                  struct tg3_rx_prodring_set *tpr)
8403 {
8404         int i;
8405
8406         if (tpr != &tp->napi[0].prodring) {
8407                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8408                      i = (i + 1) & tp->rx_std_ring_mask)
8409                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8410                                         tp->rx_pkt_map_sz);
8411
8412                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8413                         for (i = tpr->rx_jmb_cons_idx;
8414                              i != tpr->rx_jmb_prod_idx;
8415                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8416                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8417                                                 TG3_RX_JMB_MAP_SZ);
8418                         }
8419                 }
8420
8421                 return;
8422         }
8423
8424         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8425                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8426                                 tp->rx_pkt_map_sz);
8427
8428         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8429                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8430                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8431                                         TG3_RX_JMB_MAP_SZ);
8432         }
8433 }
8434
8435 /* Initialize rx rings for packet processing.
8436  *
8437  * The chip has been shut down and the driver detached from
8438  * the networking, so no interrupts or new tx packets will
8439  * end up in the driver.  tp->{tx,}lock are held and thus
8440  * we may not sleep.
8441  */
8442 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8443                                  struct tg3_rx_prodring_set *tpr)
8444 {
8445         u32 i, rx_pkt_dma_sz;
8446
8447         tpr->rx_std_cons_idx = 0;
8448         tpr->rx_std_prod_idx = 0;
8449         tpr->rx_jmb_cons_idx = 0;
8450         tpr->rx_jmb_prod_idx = 0;
8451
8452         if (tpr != &tp->napi[0].prodring) {
8453                 memset(&tpr->rx_std_buffers[0], 0,
8454                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8455                 if (tpr->rx_jmb_buffers)
8456                         memset(&tpr->rx_jmb_buffers[0], 0,
8457                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8458                 goto done;
8459         }
8460
8461         /* Zero out all descriptors. */
8462         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8463
8464         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8465         if (tg3_flag(tp, 5780_CLASS) &&
8466             tp->dev->mtu > ETH_DATA_LEN)
8467                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8468         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8469
8470         /* Initialize invariants of the rings, we only set this
8471          * stuff once.  This works because the card does not
8472          * write into the rx buffer posting rings.
8473          */
8474         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8475                 struct tg3_rx_buffer_desc *rxd;
8476
8477                 rxd = &tpr->rx_std[i];
8478                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8479                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8480                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8481                                (i << RXD_OPAQUE_INDEX_SHIFT));
8482         }
8483
8484         /* Now allocate fresh SKBs for each rx ring. */
8485         for (i = 0; i < tp->rx_pending; i++) {
8486                 unsigned int frag_size;
8487
8488                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8489                                       &frag_size) < 0) {
8490                         netdev_warn(tp->dev,
8491                                     "Using a smaller RX standard ring. Only "
8492                                     "%d out of %d buffers were allocated "
8493                                     "successfully\n", i, tp->rx_pending);
8494                         if (i == 0)
8495                                 goto initfail;
8496                         tp->rx_pending = i;
8497                         break;
8498                 }
8499         }
8500
8501         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8502                 goto done;
8503
8504         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8505
8506         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8507                 goto done;
8508
8509         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8510                 struct tg3_rx_buffer_desc *rxd;
8511
8512                 rxd = &tpr->rx_jmb[i].std;
8513                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8514                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8515                                   RXD_FLAG_JUMBO;
8516                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8517                        (i << RXD_OPAQUE_INDEX_SHIFT));
8518         }
8519
8520         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8521                 unsigned int frag_size;
8522
8523                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8524                                       &frag_size) < 0) {
8525                         netdev_warn(tp->dev,
8526                                     "Using a smaller RX jumbo ring. Only %d "
8527                                     "out of %d buffers were allocated "
8528                                     "successfully\n", i, tp->rx_jumbo_pending);
8529                         if (i == 0)
8530                                 goto initfail;
8531                         tp->rx_jumbo_pending = i;
8532                         break;
8533                 }
8534         }
8535
8536 done:
8537         return 0;
8538
8539 initfail:
8540         tg3_rx_prodring_free(tp, tpr);
8541         return -ENOMEM;
8542 }
8543
8544 static void tg3_rx_prodring_fini(struct tg3 *tp,
8545                                  struct tg3_rx_prodring_set *tpr)
8546 {
8547         kfree(tpr->rx_std_buffers);
8548         tpr->rx_std_buffers = NULL;
8549         kfree(tpr->rx_jmb_buffers);
8550         tpr->rx_jmb_buffers = NULL;
8551         if (tpr->rx_std) {
8552                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8553                                   tpr->rx_std, tpr->rx_std_mapping);
8554                 tpr->rx_std = NULL;
8555         }
8556         if (tpr->rx_jmb) {
8557                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8558                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8559                 tpr->rx_jmb = NULL;
8560         }
8561 }
8562
8563 static int tg3_rx_prodring_init(struct tg3 *tp,
8564                                 struct tg3_rx_prodring_set *tpr)
8565 {
8566         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8567                                       GFP_KERNEL);
8568         if (!tpr->rx_std_buffers)
8569                 return -ENOMEM;
8570
8571         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8572                                          TG3_RX_STD_RING_BYTES(tp),
8573                                          &tpr->rx_std_mapping,
8574                                          GFP_KERNEL);
8575         if (!tpr->rx_std)
8576                 goto err_out;
8577
8578         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8579                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8580                                               GFP_KERNEL);
8581                 if (!tpr->rx_jmb_buffers)
8582                         goto err_out;
8583
8584                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8585                                                  TG3_RX_JMB_RING_BYTES(tp),
8586                                                  &tpr->rx_jmb_mapping,
8587                                                  GFP_KERNEL);
8588                 if (!tpr->rx_jmb)
8589                         goto err_out;
8590         }
8591
8592         return 0;
8593
8594 err_out:
8595         tg3_rx_prodring_fini(tp, tpr);
8596         return -ENOMEM;
8597 }
8598
8599 /* Free up pending packets in all rx/tx rings.
8600  *
8601  * The chip has been shut down and the driver detached from
8602  * the networking, so no interrupts or new tx packets will
8603  * end up in the driver.  tp->{tx,}lock is not held and we are not
8604  * in an interrupt context and thus may sleep.
8605  */
8606 static void tg3_free_rings(struct tg3 *tp)
8607 {
8608         int i, j;
8609
8610         for (j = 0; j < tp->irq_cnt; j++) {
8611                 struct tg3_napi *tnapi = &tp->napi[j];
8612
8613                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8614
8615                 if (!tnapi->tx_buffers)
8616                         continue;
8617
8618                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8619                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8620
8621                         if (!skb)
8622                                 continue;
8623
8624                         tg3_tx_skb_unmap(tnapi, i,
8625                                          skb_shinfo(skb)->nr_frags - 1);
8626
8627                         dev_consume_skb_any(skb);
8628                 }
8629                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8630         }
8631 }
8632
8633 /* Initialize tx/rx rings for packet processing.
8634  *
8635  * The chip has been shut down and the driver detached from
8636  * the networking, so no interrupts or new tx packets will
8637  * end up in the driver.  tp->{tx,}lock are held and thus
8638  * we may not sleep.
8639  */
8640 static int tg3_init_rings(struct tg3 *tp)
8641 {
8642         int i;
8643
8644         /* Free up all the SKBs. */
8645         tg3_free_rings(tp);
8646
8647         for (i = 0; i < tp->irq_cnt; i++) {
8648                 struct tg3_napi *tnapi = &tp->napi[i];
8649
8650                 tnapi->last_tag = 0;
8651                 tnapi->last_irq_tag = 0;
8652                 tnapi->hw_status->status = 0;
8653                 tnapi->hw_status->status_tag = 0;
8654                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8655
8656                 tnapi->tx_prod = 0;
8657                 tnapi->tx_cons = 0;
8658                 if (tnapi->tx_ring)
8659                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8660
8661                 tnapi->rx_rcb_ptr = 0;
8662                 if (tnapi->rx_rcb)
8663                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8664
8665                 if (tnapi->prodring.rx_std &&
8666                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8667                         tg3_free_rings(tp);
8668                         return -ENOMEM;
8669                 }
8670         }
8671
8672         return 0;
8673 }
8674
8675 static void tg3_mem_tx_release(struct tg3 *tp)
8676 {
8677         int i;
8678
8679         for (i = 0; i < tp->irq_max; i++) {
8680                 struct tg3_napi *tnapi = &tp->napi[i];
8681
8682                 if (tnapi->tx_ring) {
8683                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8684                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8685                         tnapi->tx_ring = NULL;
8686                 }
8687
8688                 kfree(tnapi->tx_buffers);
8689                 tnapi->tx_buffers = NULL;
8690         }
8691 }
8692
8693 static int tg3_mem_tx_acquire(struct tg3 *tp)
8694 {
8695         int i;
8696         struct tg3_napi *tnapi = &tp->napi[0];
8697
8698         /* If multivector TSS is enabled, vector 0 does not handle
8699          * tx interrupts.  Don't allocate any resources for it.
8700          */
8701         if (tg3_flag(tp, ENABLE_TSS))
8702                 tnapi++;
8703
8704         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8705                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8706                                             sizeof(struct tg3_tx_ring_info),
8707                                             GFP_KERNEL);
8708                 if (!tnapi->tx_buffers)
8709                         goto err_out;
8710
8711                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8712                                                     TG3_TX_RING_BYTES,
8713                                                     &tnapi->tx_desc_mapping,
8714                                                     GFP_KERNEL);
8715                 if (!tnapi->tx_ring)
8716                         goto err_out;
8717         }
8718
8719         return 0;
8720
8721 err_out:
8722         tg3_mem_tx_release(tp);
8723         return -ENOMEM;
8724 }
8725
8726 static void tg3_mem_rx_release(struct tg3 *tp)
8727 {
8728         int i;
8729
8730         for (i = 0; i < tp->irq_max; i++) {
8731                 struct tg3_napi *tnapi = &tp->napi[i];
8732
8733                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8734
8735                 if (!tnapi->rx_rcb)
8736                         continue;
8737
8738                 dma_free_coherent(&tp->pdev->dev,
8739                                   TG3_RX_RCB_RING_BYTES(tp),
8740                                   tnapi->rx_rcb,
8741                                   tnapi->rx_rcb_mapping);
8742                 tnapi->rx_rcb = NULL;
8743         }
8744 }
8745
8746 static int tg3_mem_rx_acquire(struct tg3 *tp)
8747 {
8748         unsigned int i, limit;
8749
8750         limit = tp->rxq_cnt;
8751
8752         /* If RSS is enabled, we need a (dummy) producer ring
8753          * set on vector zero.  This is the true hw prodring.
8754          */
8755         if (tg3_flag(tp, ENABLE_RSS))
8756                 limit++;
8757
8758         for (i = 0; i < limit; i++) {
8759                 struct tg3_napi *tnapi = &tp->napi[i];
8760
8761                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8762                         goto err_out;
8763
8764                 /* If multivector RSS is enabled, vector 0
8765                  * does not handle rx or tx interrupts.
8766                  * Don't allocate any resources for it.
8767                  */
8768                 if (!i && tg3_flag(tp, ENABLE_RSS))
8769                         continue;
8770
8771                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8772                                                    TG3_RX_RCB_RING_BYTES(tp),
8773                                                    &tnapi->rx_rcb_mapping,
8774                                                    GFP_KERNEL);
8775                 if (!tnapi->rx_rcb)
8776                         goto err_out;
8777         }
8778
8779         return 0;
8780
8781 err_out:
8782         tg3_mem_rx_release(tp);
8783         return -ENOMEM;
8784 }
8785
8786 /*
8787  * Must not be invoked with interrupt sources disabled and
8788  * the hardware shutdown down.
8789  */
8790 static void tg3_free_consistent(struct tg3 *tp)
8791 {
8792         int i;
8793
8794         for (i = 0; i < tp->irq_cnt; i++) {
8795                 struct tg3_napi *tnapi = &tp->napi[i];
8796
8797                 if (tnapi->hw_status) {
8798                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8799                                           tnapi->hw_status,
8800                                           tnapi->status_mapping);
8801                         tnapi->hw_status = NULL;
8802                 }
8803         }
8804
8805         tg3_mem_rx_release(tp);
8806         tg3_mem_tx_release(tp);
8807
8808         /* tp->hw_stats can be referenced safely:
8809          *     1. under rtnl_lock
8810          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8811          */
8812         if (tp->hw_stats) {
8813                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8814                                   tp->hw_stats, tp->stats_mapping);
8815                 tp->hw_stats = NULL;
8816         }
8817 }
8818
8819 /*
8820  * Must not be invoked with interrupt sources disabled and
8821  * the hardware shutdown down.  Can sleep.
8822  */
8823 static int tg3_alloc_consistent(struct tg3 *tp)
8824 {
8825         int i;
8826
8827         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8828                                           sizeof(struct tg3_hw_stats),
8829                                           &tp->stats_mapping, GFP_KERNEL);
8830         if (!tp->hw_stats)
8831                 goto err_out;
8832
8833         for (i = 0; i < tp->irq_cnt; i++) {
8834                 struct tg3_napi *tnapi = &tp->napi[i];
8835                 struct tg3_hw_status *sblk;
8836
8837                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8838                                                       TG3_HW_STATUS_SIZE,
8839                                                       &tnapi->status_mapping,
8840                                                       GFP_KERNEL);
8841                 if (!tnapi->hw_status)
8842                         goto err_out;
8843
8844                 sblk = tnapi->hw_status;
8845
8846                 if (tg3_flag(tp, ENABLE_RSS)) {
8847                         u16 *prodptr = NULL;
8848
8849                         /*
8850                          * When RSS is enabled, the status block format changes
8851                          * slightly.  The "rx_jumbo_consumer", "reserved",
8852                          * and "rx_mini_consumer" members get mapped to the
8853                          * other three rx return ring producer indexes.
8854                          */
8855                         switch (i) {
8856                         case 1:
8857                                 prodptr = &sblk->idx[0].rx_producer;
8858                                 break;
8859                         case 2:
8860                                 prodptr = &sblk->rx_jumbo_consumer;
8861                                 break;
8862                         case 3:
8863                                 prodptr = &sblk->reserved;
8864                                 break;
8865                         case 4:
8866                                 prodptr = &sblk->rx_mini_consumer;
8867                                 break;
8868                         }
8869                         tnapi->rx_rcb_prod_idx = prodptr;
8870                 } else {
8871                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8872                 }
8873         }
8874
8875         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8876                 goto err_out;
8877
8878         return 0;
8879
8880 err_out:
8881         tg3_free_consistent(tp);
8882         return -ENOMEM;
8883 }
8884
8885 #define MAX_WAIT_CNT 1000
8886
8887 /* To stop a block, clear the enable bit and poll till it
8888  * clears.  tp->lock is held.
8889  */
8890 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8891 {
8892         unsigned int i;
8893         u32 val;
8894
8895         if (tg3_flag(tp, 5705_PLUS)) {
8896                 switch (ofs) {
8897                 case RCVLSC_MODE:
8898                 case DMAC_MODE:
8899                 case MBFREE_MODE:
8900                 case BUFMGR_MODE:
8901                 case MEMARB_MODE:
8902                         /* We can't enable/disable these bits of the
8903                          * 5705/5750, just say success.
8904                          */
8905                         return 0;
8906
8907                 default:
8908                         break;
8909                 }
8910         }
8911
8912         val = tr32(ofs);
8913         val &= ~enable_bit;
8914         tw32_f(ofs, val);
8915
8916         for (i = 0; i < MAX_WAIT_CNT; i++) {
8917                 if (pci_channel_offline(tp->pdev)) {
8918                         dev_err(&tp->pdev->dev,
8919                                 "tg3_stop_block device offline, "
8920                                 "ofs=%lx enable_bit=%x\n",
8921                                 ofs, enable_bit);
8922                         return -ENODEV;
8923                 }
8924
8925                 udelay(100);
8926                 val = tr32(ofs);
8927                 if ((val & enable_bit) == 0)
8928                         break;
8929         }
8930
8931         if (i == MAX_WAIT_CNT && !silent) {
8932                 dev_err(&tp->pdev->dev,
8933                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8934                         ofs, enable_bit);
8935                 return -ENODEV;
8936         }
8937
8938         return 0;
8939 }
8940
8941 /* tp->lock is held. */
8942 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8943 {
8944         int i, err;
8945
8946         tg3_disable_ints(tp);
8947
8948         if (pci_channel_offline(tp->pdev)) {
8949                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8950                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8951                 err = -ENODEV;
8952                 goto err_no_dev;
8953         }
8954
8955         tp->rx_mode &= ~RX_MODE_ENABLE;
8956         tw32_f(MAC_RX_MODE, tp->rx_mode);
8957         udelay(10);
8958
8959         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8960         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8961         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8962         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8963         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8964         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8965
8966         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8967         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8968         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8969         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8970         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8971         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8972         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8973
8974         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8975         tw32_f(MAC_MODE, tp->mac_mode);
8976         udelay(40);
8977
8978         tp->tx_mode &= ~TX_MODE_ENABLE;
8979         tw32_f(MAC_TX_MODE, tp->tx_mode);
8980
8981         for (i = 0; i < MAX_WAIT_CNT; i++) {
8982                 udelay(100);
8983                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8984                         break;
8985         }
8986         if (i >= MAX_WAIT_CNT) {
8987                 dev_err(&tp->pdev->dev,
8988                         "%s timed out, TX_MODE_ENABLE will not clear "
8989                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8990                 err |= -ENODEV;
8991         }
8992
8993         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8994         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8995         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8996
8997         tw32(FTQ_RESET, 0xffffffff);
8998         tw32(FTQ_RESET, 0x00000000);
8999
9000         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9001         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9002
9003 err_no_dev:
9004         for (i = 0; i < tp->irq_cnt; i++) {
9005                 struct tg3_napi *tnapi = &tp->napi[i];
9006                 if (tnapi->hw_status)
9007                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9008         }
9009
9010         return err;
9011 }
9012
9013 /* Save PCI command register before chip reset */
9014 static void tg3_save_pci_state(struct tg3 *tp)
9015 {
9016         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9017 }
9018
9019 /* Restore PCI state after chip reset */
9020 static void tg3_restore_pci_state(struct tg3 *tp)
9021 {
9022         u32 val;
9023
9024         /* Re-enable indirect register accesses. */
9025         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9026                                tp->misc_host_ctrl);
9027
9028         /* Set MAX PCI retry to zero. */
9029         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9030         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9031             tg3_flag(tp, PCIX_MODE))
9032                 val |= PCISTATE_RETRY_SAME_DMA;
9033         /* Allow reads and writes to the APE register and memory space. */
9034         if (tg3_flag(tp, ENABLE_APE))
9035                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9036                        PCISTATE_ALLOW_APE_SHMEM_WR |
9037                        PCISTATE_ALLOW_APE_PSPACE_WR;
9038         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9039
9040         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9041
9042         if (!tg3_flag(tp, PCI_EXPRESS)) {
9043                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9044                                       tp->pci_cacheline_sz);
9045                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9046                                       tp->pci_lat_timer);
9047         }
9048
9049         /* Make sure PCI-X relaxed ordering bit is clear. */
9050         if (tg3_flag(tp, PCIX_MODE)) {
9051                 u16 pcix_cmd;
9052
9053                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9054                                      &pcix_cmd);
9055                 pcix_cmd &= ~PCI_X_CMD_ERO;
9056                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9057                                       pcix_cmd);
9058         }
9059
9060         if (tg3_flag(tp, 5780_CLASS)) {
9061
9062                 /* Chip reset on 5780 will reset MSI enable bit,
9063                  * so need to restore it.
9064                  */
9065                 if (tg3_flag(tp, USING_MSI)) {
9066                         u16 ctrl;
9067
9068                         pci_read_config_word(tp->pdev,
9069                                              tp->msi_cap + PCI_MSI_FLAGS,
9070                                              &ctrl);
9071                         pci_write_config_word(tp->pdev,
9072                                               tp->msi_cap + PCI_MSI_FLAGS,
9073                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9074                         val = tr32(MSGINT_MODE);
9075                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9076                 }
9077         }
9078 }
9079
9080 static void tg3_override_clk(struct tg3 *tp)
9081 {
9082         u32 val;
9083
9084         switch (tg3_asic_rev(tp)) {
9085         case ASIC_REV_5717:
9086                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9087                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9088                      TG3_CPMU_MAC_ORIDE_ENABLE);
9089                 break;
9090
9091         case ASIC_REV_5719:
9092         case ASIC_REV_5720:
9093                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9094                 break;
9095
9096         default:
9097                 return;
9098         }
9099 }
9100
9101 static void tg3_restore_clk(struct tg3 *tp)
9102 {
9103         u32 val;
9104
9105         switch (tg3_asic_rev(tp)) {
9106         case ASIC_REV_5717:
9107                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9108                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9109                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9110                 break;
9111
9112         case ASIC_REV_5719:
9113         case ASIC_REV_5720:
9114                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9115                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9116                 break;
9117
9118         default:
9119                 return;
9120         }
9121 }
9122
9123 /* tp->lock is held. */
9124 static int tg3_chip_reset(struct tg3 *tp)
9125         __releases(tp->lock)
9126         __acquires(tp->lock)
9127 {
9128         u32 val;
9129         void (*write_op)(struct tg3 *, u32, u32);
9130         int i, err;
9131
9132         if (!pci_device_is_present(tp->pdev))
9133                 return -ENODEV;
9134
9135         tg3_nvram_lock(tp);
9136
9137         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9138
9139         /* No matching tg3_nvram_unlock() after this because
9140          * chip reset below will undo the nvram lock.
9141          */
9142         tp->nvram_lock_cnt = 0;
9143
9144         /* GRC_MISC_CFG core clock reset will clear the memory
9145          * enable bit in PCI register 4 and the MSI enable bit
9146          * on some chips, so we save relevant registers here.
9147          */
9148         tg3_save_pci_state(tp);
9149
9150         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9151             tg3_flag(tp, 5755_PLUS))
9152                 tw32(GRC_FASTBOOT_PC, 0);
9153
9154         /*
9155          * We must avoid the readl() that normally takes place.
9156          * It locks machines, causes machine checks, and other
9157          * fun things.  So, temporarily disable the 5701
9158          * hardware workaround, while we do the reset.
9159          */
9160         write_op = tp->write32;
9161         if (write_op == tg3_write_flush_reg32)
9162                 tp->write32 = tg3_write32;
9163
9164         /* Prevent the irq handler from reading or writing PCI registers
9165          * during chip reset when the memory enable bit in the PCI command
9166          * register may be cleared.  The chip does not generate interrupt
9167          * at this time, but the irq handler may still be called due to irq
9168          * sharing or irqpoll.
9169          */
9170         tg3_flag_set(tp, CHIP_RESETTING);
9171         for (i = 0; i < tp->irq_cnt; i++) {
9172                 struct tg3_napi *tnapi = &tp->napi[i];
9173                 if (tnapi->hw_status) {
9174                         tnapi->hw_status->status = 0;
9175                         tnapi->hw_status->status_tag = 0;
9176                 }
9177                 tnapi->last_tag = 0;
9178                 tnapi->last_irq_tag = 0;
9179         }
9180         smp_mb();
9181
9182         tg3_full_unlock(tp);
9183
9184         for (i = 0; i < tp->irq_cnt; i++)
9185                 synchronize_irq(tp->napi[i].irq_vec);
9186
9187         tg3_full_lock(tp, 0);
9188
9189         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9190                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9191                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9192         }
9193
9194         /* do the reset */
9195         val = GRC_MISC_CFG_CORECLK_RESET;
9196
9197         if (tg3_flag(tp, PCI_EXPRESS)) {
9198                 /* Force PCIe 1.0a mode */
9199                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9200                     !tg3_flag(tp, 57765_PLUS) &&
9201                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9202                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9203                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9204
9205                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9206                         tw32(GRC_MISC_CFG, (1 << 29));
9207                         val |= (1 << 29);
9208                 }
9209         }
9210
9211         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9212                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9213                 tw32(GRC_VCPU_EXT_CTRL,
9214                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9215         }
9216
9217         /* Set the clock to the highest frequency to avoid timeouts. With link
9218          * aware mode, the clock speed could be slow and bootcode does not
9219          * complete within the expected time. Override the clock to allow the
9220          * bootcode to finish sooner and then restore it.
9221          */
9222         tg3_override_clk(tp);
9223
9224         /* Manage gphy power for all CPMU absent PCIe devices. */
9225         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9226                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9227
9228         tw32(GRC_MISC_CFG, val);
9229
9230         /* restore 5701 hardware bug workaround write method */
9231         tp->write32 = write_op;
9232
9233         /* Unfortunately, we have to delay before the PCI read back.
9234          * Some 575X chips even will not respond to a PCI cfg access
9235          * when the reset command is given to the chip.
9236          *
9237          * How do these hardware designers expect things to work
9238          * properly if the PCI write is posted for a long period
9239          * of time?  It is always necessary to have some method by
9240          * which a register read back can occur to push the write
9241          * out which does the reset.
9242          *
9243          * For most tg3 variants the trick below was working.
9244          * Ho hum...
9245          */
9246         udelay(120);
9247
9248         /* Flush PCI posted writes.  The normal MMIO registers
9249          * are inaccessible at this time so this is the only
9250          * way to make this reliably (actually, this is no longer
9251          * the case, see above).  I tried to use indirect
9252          * register read/write but this upset some 5701 variants.
9253          */
9254         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9255
9256         udelay(120);
9257
9258         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9259                 u16 val16;
9260
9261                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9262                         int j;
9263                         u32 cfg_val;
9264
9265                         /* Wait for link training to complete.  */
9266                         for (j = 0; j < 5000; j++)
9267                                 udelay(100);
9268
9269                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9270                         pci_write_config_dword(tp->pdev, 0xc4,
9271                                                cfg_val | (1 << 15));
9272                 }
9273
9274                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9275                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9276                 /*
9277                  * Older PCIe devices only support the 128 byte
9278                  * MPS setting.  Enforce the restriction.
9279                  */
9280                 if (!tg3_flag(tp, CPMU_PRESENT))
9281                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9282                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9283
9284                 /* Clear error status */
9285                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9286                                       PCI_EXP_DEVSTA_CED |
9287                                       PCI_EXP_DEVSTA_NFED |
9288                                       PCI_EXP_DEVSTA_FED |
9289                                       PCI_EXP_DEVSTA_URD);
9290         }
9291
9292         tg3_restore_pci_state(tp);
9293
9294         tg3_flag_clear(tp, CHIP_RESETTING);
9295         tg3_flag_clear(tp, ERROR_PROCESSED);
9296
9297         val = 0;
9298         if (tg3_flag(tp, 5780_CLASS))
9299                 val = tr32(MEMARB_MODE);
9300         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9301
9302         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9303                 tg3_stop_fw(tp);
9304                 tw32(0x5000, 0x400);
9305         }
9306
9307         if (tg3_flag(tp, IS_SSB_CORE)) {
9308                 /*
9309                  * BCM4785: In order to avoid repercussions from using
9310                  * potentially defective internal ROM, stop the Rx RISC CPU,
9311                  * which is not required.
9312                  */
9313                 tg3_stop_fw(tp);
9314                 tg3_halt_cpu(tp, RX_CPU_BASE);
9315         }
9316
9317         err = tg3_poll_fw(tp);
9318         if (err)
9319                 return err;
9320
9321         tw32(GRC_MODE, tp->grc_mode);
9322
9323         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9324                 val = tr32(0xc4);
9325
9326                 tw32(0xc4, val | (1 << 15));
9327         }
9328
9329         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9330             tg3_asic_rev(tp) == ASIC_REV_5705) {
9331                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9332                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9333                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9334                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9335         }
9336
9337         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9338                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9339                 val = tp->mac_mode;
9340         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9341                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9342                 val = tp->mac_mode;
9343         } else
9344                 val = 0;
9345
9346         tw32_f(MAC_MODE, val);
9347         udelay(40);
9348
9349         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9350
9351         tg3_mdio_start(tp);
9352
9353         if (tg3_flag(tp, PCI_EXPRESS) &&
9354             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9355             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9356             !tg3_flag(tp, 57765_PLUS)) {
9357                 val = tr32(0x7c00);
9358
9359                 tw32(0x7c00, val | (1 << 25));
9360         }
9361
9362         tg3_restore_clk(tp);
9363
9364         /* Increase the core clock speed to fix tx timeout issue for 5762
9365          * with 100Mbps link speed.
9366          */
9367         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9368                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9369                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9370                      TG3_CPMU_MAC_ORIDE_ENABLE);
9371         }
9372
9373         /* Reprobe ASF enable state.  */
9374         tg3_flag_clear(tp, ENABLE_ASF);
9375         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9376                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9377
9378         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9379         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9380         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9381                 u32 nic_cfg;
9382
9383                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9384                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9385                         tg3_flag_set(tp, ENABLE_ASF);
9386                         tp->last_event_jiffies = jiffies;
9387                         if (tg3_flag(tp, 5750_PLUS))
9388                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9389
9390                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9391                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9392                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9393                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9394                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9395                 }
9396         }
9397
9398         return 0;
9399 }
9400
9401 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9402 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9403 static void __tg3_set_rx_mode(struct net_device *);
9404
9405 /* tp->lock is held. */
9406 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9407 {
9408         int err;
9409
9410         tg3_stop_fw(tp);
9411
9412         tg3_write_sig_pre_reset(tp, kind);
9413
9414         tg3_abort_hw(tp, silent);
9415         err = tg3_chip_reset(tp);
9416
9417         __tg3_set_mac_addr(tp, false);
9418
9419         tg3_write_sig_legacy(tp, kind);
9420         tg3_write_sig_post_reset(tp, kind);
9421
9422         if (tp->hw_stats) {
9423                 /* Save the stats across chip resets... */
9424                 tg3_get_nstats(tp, &tp->net_stats_prev);
9425                 tg3_get_estats(tp, &tp->estats_prev);
9426
9427                 /* And make sure the next sample is new data */
9428                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9429         }
9430
9431         return err;
9432 }
9433
9434 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9435 {
9436         struct tg3 *tp = netdev_priv(dev);
9437         struct sockaddr *addr = p;
9438         int err = 0;
9439         bool skip_mac_1 = false;
9440
9441         if (!is_valid_ether_addr(addr->sa_data))
9442                 return -EADDRNOTAVAIL;
9443
9444         eth_hw_addr_set(dev, addr->sa_data);
9445
9446         if (!netif_running(dev))
9447                 return 0;
9448
9449         if (tg3_flag(tp, ENABLE_ASF)) {
9450                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9451
9452                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9453                 addr0_low = tr32(MAC_ADDR_0_LOW);
9454                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9455                 addr1_low = tr32(MAC_ADDR_1_LOW);
9456
9457                 /* Skip MAC addr 1 if ASF is using it. */
9458                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9459                     !(addr1_high == 0 && addr1_low == 0))
9460                         skip_mac_1 = true;
9461         }
9462         spin_lock_bh(&tp->lock);
9463         __tg3_set_mac_addr(tp, skip_mac_1);
9464         __tg3_set_rx_mode(dev);
9465         spin_unlock_bh(&tp->lock);
9466
9467         return err;
9468 }
9469
9470 /* tp->lock is held. */
9471 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9472                            dma_addr_t mapping, u32 maxlen_flags,
9473                            u32 nic_addr)
9474 {
9475         tg3_write_mem(tp,
9476                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9477                       ((u64) mapping >> 32));
9478         tg3_write_mem(tp,
9479                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9480                       ((u64) mapping & 0xffffffff));
9481         tg3_write_mem(tp,
9482                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9483                        maxlen_flags);
9484
9485         if (!tg3_flag(tp, 5705_PLUS))
9486                 tg3_write_mem(tp,
9487                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9488                               nic_addr);
9489 }
9490
9491
9492 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9493 {
9494         int i = 0;
9495
9496         if (!tg3_flag(tp, ENABLE_TSS)) {
9497                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9498                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9499                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9500         } else {
9501                 tw32(HOSTCC_TXCOL_TICKS, 0);
9502                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9503                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9504
9505                 for (; i < tp->txq_cnt; i++) {
9506                         u32 reg;
9507
9508                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9509                         tw32(reg, ec->tx_coalesce_usecs);
9510                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9511                         tw32(reg, ec->tx_max_coalesced_frames);
9512                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9513                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9514                 }
9515         }
9516
9517         for (; i < tp->irq_max - 1; i++) {
9518                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9519                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9520                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9521         }
9522 }
9523
9524 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9525 {
9526         int i = 0;
9527         u32 limit = tp->rxq_cnt;
9528
9529         if (!tg3_flag(tp, ENABLE_RSS)) {
9530                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9531                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9532                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9533                 limit--;
9534         } else {
9535                 tw32(HOSTCC_RXCOL_TICKS, 0);
9536                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9537                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9538         }
9539
9540         for (; i < limit; i++) {
9541                 u32 reg;
9542
9543                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9544                 tw32(reg, ec->rx_coalesce_usecs);
9545                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9546                 tw32(reg, ec->rx_max_coalesced_frames);
9547                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9548                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9549         }
9550
9551         for (; i < tp->irq_max - 1; i++) {
9552                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9553                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9554                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9555         }
9556 }
9557
9558 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9559 {
9560         tg3_coal_tx_init(tp, ec);
9561         tg3_coal_rx_init(tp, ec);
9562
9563         if (!tg3_flag(tp, 5705_PLUS)) {
9564                 u32 val = ec->stats_block_coalesce_usecs;
9565
9566                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9567                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9568
9569                 if (!tp->link_up)
9570                         val = 0;
9571
9572                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9573         }
9574 }
9575
9576 /* tp->lock is held. */
9577 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9578 {
9579         u32 txrcb, limit;
9580
9581         /* Disable all transmit rings but the first. */
9582         if (!tg3_flag(tp, 5705_PLUS))
9583                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9584         else if (tg3_flag(tp, 5717_PLUS))
9585                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9586         else if (tg3_flag(tp, 57765_CLASS) ||
9587                  tg3_asic_rev(tp) == ASIC_REV_5762)
9588                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9589         else
9590                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9591
9592         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9593              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9594                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9595                               BDINFO_FLAGS_DISABLED);
9596 }
9597
9598 /* tp->lock is held. */
9599 static void tg3_tx_rcbs_init(struct tg3 *tp)
9600 {
9601         int i = 0;
9602         u32 txrcb = NIC_SRAM_SEND_RCB;
9603
9604         if (tg3_flag(tp, ENABLE_TSS))
9605                 i++;
9606
9607         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9608                 struct tg3_napi *tnapi = &tp->napi[i];
9609
9610                 if (!tnapi->tx_ring)
9611                         continue;
9612
9613                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9614                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9615                                NIC_SRAM_TX_BUFFER_DESC);
9616         }
9617 }
9618
9619 /* tp->lock is held. */
9620 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9621 {
9622         u32 rxrcb, limit;
9623
9624         /* Disable all receive return rings but the first. */
9625         if (tg3_flag(tp, 5717_PLUS))
9626                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9627         else if (!tg3_flag(tp, 5705_PLUS))
9628                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9629         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9630                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9631                  tg3_flag(tp, 57765_CLASS))
9632                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9633         else
9634                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9635
9636         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9637              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9638                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9639                               BDINFO_FLAGS_DISABLED);
9640 }
9641
9642 /* tp->lock is held. */
9643 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9644 {
9645         int i = 0;
9646         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9647
9648         if (tg3_flag(tp, ENABLE_RSS))
9649                 i++;
9650
9651         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9652                 struct tg3_napi *tnapi = &tp->napi[i];
9653
9654                 if (!tnapi->rx_rcb)
9655                         continue;
9656
9657                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9658                                (tp->rx_ret_ring_mask + 1) <<
9659                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9660         }
9661 }
9662
9663 /* tp->lock is held. */
9664 static void tg3_rings_reset(struct tg3 *tp)
9665 {
9666         int i;
9667         u32 stblk;
9668         struct tg3_napi *tnapi = &tp->napi[0];
9669
9670         tg3_tx_rcbs_disable(tp);
9671
9672         tg3_rx_ret_rcbs_disable(tp);
9673
9674         /* Disable interrupts */
9675         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9676         tp->napi[0].chk_msi_cnt = 0;
9677         tp->napi[0].last_rx_cons = 0;
9678         tp->napi[0].last_tx_cons = 0;
9679
9680         /* Zero mailbox registers. */
9681         if (tg3_flag(tp, SUPPORT_MSIX)) {
9682                 for (i = 1; i < tp->irq_max; i++) {
9683                         tp->napi[i].tx_prod = 0;
9684                         tp->napi[i].tx_cons = 0;
9685                         if (tg3_flag(tp, ENABLE_TSS))
9686                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9687                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9688                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9689                         tp->napi[i].chk_msi_cnt = 0;
9690                         tp->napi[i].last_rx_cons = 0;
9691                         tp->napi[i].last_tx_cons = 0;
9692                 }
9693                 if (!tg3_flag(tp, ENABLE_TSS))
9694                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9695         } else {
9696                 tp->napi[0].tx_prod = 0;
9697                 tp->napi[0].tx_cons = 0;
9698                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9699                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9700         }
9701
9702         /* Make sure the NIC-based send BD rings are disabled. */
9703         if (!tg3_flag(tp, 5705_PLUS)) {
9704                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9705                 for (i = 0; i < 16; i++)
9706                         tw32_tx_mbox(mbox + i * 8, 0);
9707         }
9708
9709         /* Clear status block in ram. */
9710         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9711
9712         /* Set status block DMA address */
9713         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9714              ((u64) tnapi->status_mapping >> 32));
9715         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9716              ((u64) tnapi->status_mapping & 0xffffffff));
9717
9718         stblk = HOSTCC_STATBLCK_RING1;
9719
9720         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9721                 u64 mapping = (u64)tnapi->status_mapping;
9722                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9723                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9724                 stblk += 8;
9725
9726                 /* Clear status block in ram. */
9727                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9728         }
9729
9730         tg3_tx_rcbs_init(tp);
9731         tg3_rx_ret_rcbs_init(tp);
9732 }
9733
9734 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9735 {
9736         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9737
9738         if (!tg3_flag(tp, 5750_PLUS) ||
9739             tg3_flag(tp, 5780_CLASS) ||
9740             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9741             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9742             tg3_flag(tp, 57765_PLUS))
9743                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9744         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9745                  tg3_asic_rev(tp) == ASIC_REV_5787)
9746                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9747         else
9748                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9749
9750         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9751         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9752
9753         val = min(nic_rep_thresh, host_rep_thresh);
9754         tw32(RCVBDI_STD_THRESH, val);
9755
9756         if (tg3_flag(tp, 57765_PLUS))
9757                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9758
9759         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9760                 return;
9761
9762         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9763
9764         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9765
9766         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9767         tw32(RCVBDI_JUMBO_THRESH, val);
9768
9769         if (tg3_flag(tp, 57765_PLUS))
9770                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9771 }
9772
9773 static inline u32 calc_crc(unsigned char *buf, int len)
9774 {
9775         u32 reg;
9776         u32 tmp;
9777         int j, k;
9778
9779         reg = 0xffffffff;
9780
9781         for (j = 0; j < len; j++) {
9782                 reg ^= buf[j];
9783
9784                 for (k = 0; k < 8; k++) {
9785                         tmp = reg & 0x01;
9786
9787                         reg >>= 1;
9788
9789                         if (tmp)
9790                                 reg ^= CRC32_POLY_LE;
9791                 }
9792         }
9793
9794         return ~reg;
9795 }
9796
9797 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9798 {
9799         /* accept or reject all multicast frames */
9800         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9801         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9802         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9803         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9804 }
9805
9806 static void __tg3_set_rx_mode(struct net_device *dev)
9807 {
9808         struct tg3 *tp = netdev_priv(dev);
9809         u32 rx_mode;
9810
9811         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9812                                   RX_MODE_KEEP_VLAN_TAG);
9813
9814 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9815         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9816          * flag clear.
9817          */
9818         if (!tg3_flag(tp, ENABLE_ASF))
9819                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9820 #endif
9821
9822         if (dev->flags & IFF_PROMISC) {
9823                 /* Promiscuous mode. */
9824                 rx_mode |= RX_MODE_PROMISC;
9825         } else if (dev->flags & IFF_ALLMULTI) {
9826                 /* Accept all multicast. */
9827                 tg3_set_multi(tp, 1);
9828         } else if (netdev_mc_empty(dev)) {
9829                 /* Reject all multicast. */
9830                 tg3_set_multi(tp, 0);
9831         } else {
9832                 /* Accept one or more multicast(s). */
9833                 struct netdev_hw_addr *ha;
9834                 u32 mc_filter[4] = { 0, };
9835                 u32 regidx;
9836                 u32 bit;
9837                 u32 crc;
9838
9839                 netdev_for_each_mc_addr(ha, dev) {
9840                         crc = calc_crc(ha->addr, ETH_ALEN);
9841                         bit = ~crc & 0x7f;
9842                         regidx = (bit & 0x60) >> 5;
9843                         bit &= 0x1f;
9844                         mc_filter[regidx] |= (1 << bit);
9845                 }
9846
9847                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9848                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9849                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9850                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9851         }
9852
9853         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9854                 rx_mode |= RX_MODE_PROMISC;
9855         } else if (!(dev->flags & IFF_PROMISC)) {
9856                 /* Add all entries into to the mac addr filter list */
9857                 int i = 0;
9858                 struct netdev_hw_addr *ha;
9859
9860                 netdev_for_each_uc_addr(ha, dev) {
9861                         __tg3_set_one_mac_addr(tp, ha->addr,
9862                                                i + TG3_UCAST_ADDR_IDX(tp));
9863                         i++;
9864                 }
9865         }
9866
9867         if (rx_mode != tp->rx_mode) {
9868                 tp->rx_mode = rx_mode;
9869                 tw32_f(MAC_RX_MODE, rx_mode);
9870                 udelay(10);
9871         }
9872 }
9873
9874 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9875 {
9876         int i;
9877
9878         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9879                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9880 }
9881
9882 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9883 {
9884         int i;
9885
9886         if (!tg3_flag(tp, SUPPORT_MSIX))
9887                 return;
9888
9889         if (tp->rxq_cnt == 1) {
9890                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9891                 return;
9892         }
9893
9894         /* Validate table against current IRQ count */
9895         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9896                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9897                         break;
9898         }
9899
9900         if (i != TG3_RSS_INDIR_TBL_SIZE)
9901                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9902 }
9903
9904 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9905 {
9906         int i = 0;
9907         u32 reg = MAC_RSS_INDIR_TBL_0;
9908
9909         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9910                 u32 val = tp->rss_ind_tbl[i];
9911                 i++;
9912                 for (; i % 8; i++) {
9913                         val <<= 4;
9914                         val |= tp->rss_ind_tbl[i];
9915                 }
9916                 tw32(reg, val);
9917                 reg += 4;
9918         }
9919 }
9920
9921 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9922 {
9923         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9924                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9925         else
9926                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9927 }
9928
9929 /* tp->lock is held. */
9930 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9931 {
9932         u32 val, rdmac_mode;
9933         int i, err, limit;
9934         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9935
9936         tg3_disable_ints(tp);
9937
9938         tg3_stop_fw(tp);
9939
9940         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9941
9942         if (tg3_flag(tp, INIT_COMPLETE))
9943                 tg3_abort_hw(tp, 1);
9944
9945         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9946             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9947                 tg3_phy_pull_config(tp);
9948                 tg3_eee_pull_config(tp, NULL);
9949                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9950         }
9951
9952         /* Enable MAC control of LPI */
9953         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9954                 tg3_setup_eee(tp);
9955
9956         if (reset_phy)
9957                 tg3_phy_reset(tp);
9958
9959         err = tg3_chip_reset(tp);
9960         if (err)
9961                 return err;
9962
9963         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9964
9965         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9966                 val = tr32(TG3_CPMU_CTRL);
9967                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9968                 tw32(TG3_CPMU_CTRL, val);
9969
9970                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9971                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9972                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9973                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9974
9975                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9976                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9977                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9978                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9979
9980                 val = tr32(TG3_CPMU_HST_ACC);
9981                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9982                 val |= CPMU_HST_ACC_MACCLK_6_25;
9983                 tw32(TG3_CPMU_HST_ACC, val);
9984         }
9985
9986         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9987                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9988                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9989                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9990                 tw32(PCIE_PWR_MGMT_THRESH, val);
9991
9992                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9993                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9994
9995                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9996
9997                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9998                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9999         }
10000
10001         if (tg3_flag(tp, L1PLLPD_EN)) {
10002                 u32 grc_mode = tr32(GRC_MODE);
10003
10004                 /* Access the lower 1K of PL PCIE block registers. */
10005                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10006                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10007
10008                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10009                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10010                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10011
10012                 tw32(GRC_MODE, grc_mode);
10013         }
10014
10015         if (tg3_flag(tp, 57765_CLASS)) {
10016                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10017                         u32 grc_mode = tr32(GRC_MODE);
10018
10019                         /* Access the lower 1K of PL PCIE block registers. */
10020                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10021                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10022
10023                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10024                                    TG3_PCIE_PL_LO_PHYCTL5);
10025                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10026                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10027
10028                         tw32(GRC_MODE, grc_mode);
10029                 }
10030
10031                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10032                         u32 grc_mode;
10033
10034                         /* Fix transmit hangs */
10035                         val = tr32(TG3_CPMU_PADRNG_CTL);
10036                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10037                         tw32(TG3_CPMU_PADRNG_CTL, val);
10038
10039                         grc_mode = tr32(GRC_MODE);
10040
10041                         /* Access the lower 1K of DL PCIE block registers. */
10042                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10043                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10044
10045                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10046                                    TG3_PCIE_DL_LO_FTSMAX);
10047                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10048                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10049                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10050
10051                         tw32(GRC_MODE, grc_mode);
10052                 }
10053
10054                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10055                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10056                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10057                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10058         }
10059
10060         /* This works around an issue with Athlon chipsets on
10061          * B3 tigon3 silicon.  This bit has no effect on any
10062          * other revision.  But do not set this on PCI Express
10063          * chips and don't even touch the clocks if the CPMU is present.
10064          */
10065         if (!tg3_flag(tp, CPMU_PRESENT)) {
10066                 if (!tg3_flag(tp, PCI_EXPRESS))
10067                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10068                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10069         }
10070
10071         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10072             tg3_flag(tp, PCIX_MODE)) {
10073                 val = tr32(TG3PCI_PCISTATE);
10074                 val |= PCISTATE_RETRY_SAME_DMA;
10075                 tw32(TG3PCI_PCISTATE, val);
10076         }
10077
10078         if (tg3_flag(tp, ENABLE_APE)) {
10079                 /* Allow reads and writes to the
10080                  * APE register and memory space.
10081                  */
10082                 val = tr32(TG3PCI_PCISTATE);
10083                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10084                        PCISTATE_ALLOW_APE_SHMEM_WR |
10085                        PCISTATE_ALLOW_APE_PSPACE_WR;
10086                 tw32(TG3PCI_PCISTATE, val);
10087         }
10088
10089         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10090                 /* Enable some hw fixes.  */
10091                 val = tr32(TG3PCI_MSI_DATA);
10092                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10093                 tw32(TG3PCI_MSI_DATA, val);
10094         }
10095
10096         /* Descriptor ring init may make accesses to the
10097          * NIC SRAM area to setup the TX descriptors, so we
10098          * can only do this after the hardware has been
10099          * successfully reset.
10100          */
10101         err = tg3_init_rings(tp);
10102         if (err)
10103                 return err;
10104
10105         if (tg3_flag(tp, 57765_PLUS)) {
10106                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10107                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10108                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10109                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10110                 if (!tg3_flag(tp, 57765_CLASS) &&
10111                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10112                     tg3_asic_rev(tp) != ASIC_REV_5762)
10113                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10114                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10115         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10116                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10117                 /* This value is determined during the probe time DMA
10118                  * engine test, tg3_test_dma.
10119                  */
10120                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10121         }
10122
10123         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10124                           GRC_MODE_4X_NIC_SEND_RINGS |
10125                           GRC_MODE_NO_TX_PHDR_CSUM |
10126                           GRC_MODE_NO_RX_PHDR_CSUM);
10127         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10128
10129         /* Pseudo-header checksum is done by hardware logic and not
10130          * the offload processers, so make the chip do the pseudo-
10131          * header checksums on receive.  For transmit it is more
10132          * convenient to do the pseudo-header checksum in software
10133          * as Linux does that on transmit for us in all cases.
10134          */
10135         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10136
10137         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10138         if (tp->rxptpctl)
10139                 tw32(TG3_RX_PTP_CTL,
10140                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10141
10142         if (tg3_flag(tp, PTP_CAPABLE))
10143                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10144
10145         tw32(GRC_MODE, tp->grc_mode | val);
10146
10147         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10148          * south bridge limitation. As a workaround, Driver is setting MRRS
10149          * to 2048 instead of default 4096.
10150          */
10151         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10152             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10153                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10154                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10155         }
10156
10157         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10158         val = tr32(GRC_MISC_CFG);
10159         val &= ~0xff;
10160         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10161         tw32(GRC_MISC_CFG, val);
10162
10163         /* Initialize MBUF/DESC pool. */
10164         if (tg3_flag(tp, 5750_PLUS)) {
10165                 /* Do nothing.  */
10166         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10167                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10168                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10169                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10170                 else
10171                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10172                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10173                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10174         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10175                 int fw_len;
10176
10177                 fw_len = tp->fw_len;
10178                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10179                 tw32(BUFMGR_MB_POOL_ADDR,
10180                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10181                 tw32(BUFMGR_MB_POOL_SIZE,
10182                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10183         }
10184
10185         if (tp->dev->mtu <= ETH_DATA_LEN) {
10186                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10187                      tp->bufmgr_config.mbuf_read_dma_low_water);
10188                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10189                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10190                 tw32(BUFMGR_MB_HIGH_WATER,
10191                      tp->bufmgr_config.mbuf_high_water);
10192         } else {
10193                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10194                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10195                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10196                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10197                 tw32(BUFMGR_MB_HIGH_WATER,
10198                      tp->bufmgr_config.mbuf_high_water_jumbo);
10199         }
10200         tw32(BUFMGR_DMA_LOW_WATER,
10201              tp->bufmgr_config.dma_low_water);
10202         tw32(BUFMGR_DMA_HIGH_WATER,
10203              tp->bufmgr_config.dma_high_water);
10204
10205         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10206         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10207                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10208         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10209             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10210             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10211             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10212                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10213         tw32(BUFMGR_MODE, val);
10214         for (i = 0; i < 2000; i++) {
10215                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10216                         break;
10217                 udelay(10);
10218         }
10219         if (i >= 2000) {
10220                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10221                 return -ENODEV;
10222         }
10223
10224         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10225                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10226
10227         tg3_setup_rxbd_thresholds(tp);
10228
10229         /* Initialize TG3_BDINFO's at:
10230          *  RCVDBDI_STD_BD:     standard eth size rx ring
10231          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10232          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10233          *
10234          * like so:
10235          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10236          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10237          *                              ring attribute flags
10238          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10239          *
10240          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10241          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10242          *
10243          * The size of each ring is fixed in the firmware, but the location is
10244          * configurable.
10245          */
10246         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10247              ((u64) tpr->rx_std_mapping >> 32));
10248         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10249              ((u64) tpr->rx_std_mapping & 0xffffffff));
10250         if (!tg3_flag(tp, 5717_PLUS))
10251                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10252                      NIC_SRAM_RX_BUFFER_DESC);
10253
10254         /* Disable the mini ring */
10255         if (!tg3_flag(tp, 5705_PLUS))
10256                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10257                      BDINFO_FLAGS_DISABLED);
10258
10259         /* Program the jumbo buffer descriptor ring control
10260          * blocks on those devices that have them.
10261          */
10262         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10263             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10264
10265                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10266                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10267                              ((u64) tpr->rx_jmb_mapping >> 32));
10268                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10269                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10270                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10271                               BDINFO_FLAGS_MAXLEN_SHIFT;
10272                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10273                              val | BDINFO_FLAGS_USE_EXT_RECV);
10274                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10275                             tg3_flag(tp, 57765_CLASS) ||
10276                             tg3_asic_rev(tp) == ASIC_REV_5762)
10277                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10278                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10279                 } else {
10280                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10281                              BDINFO_FLAGS_DISABLED);
10282                 }
10283
10284                 if (tg3_flag(tp, 57765_PLUS)) {
10285                         val = TG3_RX_STD_RING_SIZE(tp);
10286                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10287                         val |= (TG3_RX_STD_DMA_SZ << 2);
10288                 } else
10289                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10290         } else
10291                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10292
10293         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10294
10295         tpr->rx_std_prod_idx = tp->rx_pending;
10296         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10297
10298         tpr->rx_jmb_prod_idx =
10299                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10300         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10301
10302         tg3_rings_reset(tp);
10303
10304         /* Initialize MAC address and backoff seed. */
10305         __tg3_set_mac_addr(tp, false);
10306
10307         /* MTU + ethernet header + FCS + optional VLAN tag */
10308         tw32(MAC_RX_MTU_SIZE,
10309              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10310
10311         /* The slot time is changed by tg3_setup_phy if we
10312          * run at gigabit with half duplex.
10313          */
10314         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10315               (6 << TX_LENGTHS_IPG_SHIFT) |
10316               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10317
10318         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10319             tg3_asic_rev(tp) == ASIC_REV_5762)
10320                 val |= tr32(MAC_TX_LENGTHS) &
10321                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10322                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10323
10324         tw32(MAC_TX_LENGTHS, val);
10325
10326         /* Receive rules. */
10327         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10328         tw32(RCVLPC_CONFIG, 0x0181);
10329
10330         /* Calculate RDMAC_MODE setting early, we need it to determine
10331          * the RCVLPC_STATE_ENABLE mask.
10332          */
10333         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10334                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10335                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10336                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10337                       RDMAC_MODE_LNGREAD_ENAB);
10338
10339         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10340                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10341
10342         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10343             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10344             tg3_asic_rev(tp) == ASIC_REV_57780)
10345                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10346                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10347                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10348
10349         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10350             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10351                 if (tg3_flag(tp, TSO_CAPABLE)) {
10352                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10353                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10354                            !tg3_flag(tp, IS_5788)) {
10355                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10356                 }
10357         }
10358
10359         if (tg3_flag(tp, PCI_EXPRESS))
10360                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10361
10362         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10363                 tp->dma_limit = 0;
10364                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10365                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10366                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10367                 }
10368         }
10369
10370         if (tg3_flag(tp, HW_TSO_1) ||
10371             tg3_flag(tp, HW_TSO_2) ||
10372             tg3_flag(tp, HW_TSO_3))
10373                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10374
10375         if (tg3_flag(tp, 57765_PLUS) ||
10376             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10377             tg3_asic_rev(tp) == ASIC_REV_57780)
10378                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10379
10380         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10381             tg3_asic_rev(tp) == ASIC_REV_5762)
10382                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10383
10384         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10385             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10386             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10387             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10388             tg3_flag(tp, 57765_PLUS)) {
10389                 u32 tgtreg;
10390
10391                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10392                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10393                 else
10394                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10395
10396                 val = tr32(tgtreg);
10397                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10398                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10399                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10400                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10401                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10402                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10403                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10404                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10405                 }
10406                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10407         }
10408
10409         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10410             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10411             tg3_asic_rev(tp) == ASIC_REV_5762) {
10412                 u32 tgtreg;
10413
10414                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10415                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10416                 else
10417                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10418
10419                 val = tr32(tgtreg);
10420                 tw32(tgtreg, val |
10421                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10422                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10423         }
10424
10425         /* Receive/send statistics. */
10426         if (tg3_flag(tp, 5750_PLUS)) {
10427                 val = tr32(RCVLPC_STATS_ENABLE);
10428                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10429                 tw32(RCVLPC_STATS_ENABLE, val);
10430         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10431                    tg3_flag(tp, TSO_CAPABLE)) {
10432                 val = tr32(RCVLPC_STATS_ENABLE);
10433                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10434                 tw32(RCVLPC_STATS_ENABLE, val);
10435         } else {
10436                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10437         }
10438         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10439         tw32(SNDDATAI_STATSENAB, 0xffffff);
10440         tw32(SNDDATAI_STATSCTRL,
10441              (SNDDATAI_SCTRL_ENABLE |
10442               SNDDATAI_SCTRL_FASTUPD));
10443
10444         /* Setup host coalescing engine. */
10445         tw32(HOSTCC_MODE, 0);
10446         for (i = 0; i < 2000; i++) {
10447                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10448                         break;
10449                 udelay(10);
10450         }
10451
10452         __tg3_set_coalesce(tp, &tp->coal);
10453
10454         if (!tg3_flag(tp, 5705_PLUS)) {
10455                 /* Status/statistics block address.  See tg3_timer,
10456                  * the tg3_periodic_fetch_stats call there, and
10457                  * tg3_get_stats to see how this works for 5705/5750 chips.
10458                  */
10459                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10460                      ((u64) tp->stats_mapping >> 32));
10461                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10462                      ((u64) tp->stats_mapping & 0xffffffff));
10463                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10464
10465                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10466
10467                 /* Clear statistics and status block memory areas */
10468                 for (i = NIC_SRAM_STATS_BLK;
10469                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10470                      i += sizeof(u32)) {
10471                         tg3_write_mem(tp, i, 0);
10472                         udelay(40);
10473                 }
10474         }
10475
10476         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10477
10478         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10479         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10480         if (!tg3_flag(tp, 5705_PLUS))
10481                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10482
10483         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10484                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10485                 /* reset to prevent losing 1st rx packet intermittently */
10486                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10487                 udelay(10);
10488         }
10489
10490         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10491                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10492                         MAC_MODE_FHDE_ENABLE;
10493         if (tg3_flag(tp, ENABLE_APE))
10494                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10495         if (!tg3_flag(tp, 5705_PLUS) &&
10496             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10497             tg3_asic_rev(tp) != ASIC_REV_5700)
10498                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10499         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10500         udelay(40);
10501
10502         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10503          * If TG3_FLAG_IS_NIC is zero, we should read the
10504          * register to preserve the GPIO settings for LOMs. The GPIOs,
10505          * whether used as inputs or outputs, are set by boot code after
10506          * reset.
10507          */
10508         if (!tg3_flag(tp, IS_NIC)) {
10509                 u32 gpio_mask;
10510
10511                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10512                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10513                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10514
10515                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10516                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10517                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10518
10519                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10520                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10521
10522                 tp->grc_local_ctrl &= ~gpio_mask;
10523                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10524
10525                 /* GPIO1 must be driven high for eeprom write protect */
10526                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10527                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10528                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10529         }
10530         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10531         udelay(100);
10532
10533         if (tg3_flag(tp, USING_MSIX)) {
10534                 val = tr32(MSGINT_MODE);
10535                 val |= MSGINT_MODE_ENABLE;
10536                 if (tp->irq_cnt > 1)
10537                         val |= MSGINT_MODE_MULTIVEC_EN;
10538                 if (!tg3_flag(tp, 1SHOT_MSI))
10539                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10540                 tw32(MSGINT_MODE, val);
10541         }
10542
10543         if (!tg3_flag(tp, 5705_PLUS)) {
10544                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10545                 udelay(40);
10546         }
10547
10548         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10549                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10550                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10551                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10552                WDMAC_MODE_LNGREAD_ENAB);
10553
10554         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10555             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10556                 if (tg3_flag(tp, TSO_CAPABLE) &&
10557                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10558                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10559                         /* nothing */
10560                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10561                            !tg3_flag(tp, IS_5788)) {
10562                         val |= WDMAC_MODE_RX_ACCEL;
10563                 }
10564         }
10565
10566         /* Enable host coalescing bug fix */
10567         if (tg3_flag(tp, 5755_PLUS))
10568                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10569
10570         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10571                 val |= WDMAC_MODE_BURST_ALL_DATA;
10572
10573         tw32_f(WDMAC_MODE, val);
10574         udelay(40);
10575
10576         if (tg3_flag(tp, PCIX_MODE)) {
10577                 u16 pcix_cmd;
10578
10579                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10580                                      &pcix_cmd);
10581                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10582                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10583                         pcix_cmd |= PCI_X_CMD_READ_2K;
10584                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10585                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10586                         pcix_cmd |= PCI_X_CMD_READ_2K;
10587                 }
10588                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10589                                       pcix_cmd);
10590         }
10591
10592         tw32_f(RDMAC_MODE, rdmac_mode);
10593         udelay(40);
10594
10595         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10596             tg3_asic_rev(tp) == ASIC_REV_5720) {
10597                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10598                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10599                                 break;
10600                 }
10601                 if (i < TG3_NUM_RDMA_CHANNELS) {
10602                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10603                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10604                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10605                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10606                 }
10607         }
10608
10609         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10610         if (!tg3_flag(tp, 5705_PLUS))
10611                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10612
10613         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10614                 tw32(SNDDATAC_MODE,
10615                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10616         else
10617                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10618
10619         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10620         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10621         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10622         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10623                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10624         tw32(RCVDBDI_MODE, val);
10625         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10626         if (tg3_flag(tp, HW_TSO_1) ||
10627             tg3_flag(tp, HW_TSO_2) ||
10628             tg3_flag(tp, HW_TSO_3))
10629                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10630         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10631         if (tg3_flag(tp, ENABLE_TSS))
10632                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10633         tw32(SNDBDI_MODE, val);
10634         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10635
10636         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10637                 err = tg3_load_5701_a0_firmware_fix(tp);
10638                 if (err)
10639                         return err;
10640         }
10641
10642         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10643                 /* Ignore any errors for the firmware download. If download
10644                  * fails, the device will operate with EEE disabled
10645                  */
10646                 tg3_load_57766_firmware(tp);
10647         }
10648
10649         if (tg3_flag(tp, TSO_CAPABLE)) {
10650                 err = tg3_load_tso_firmware(tp);
10651                 if (err)
10652                         return err;
10653         }
10654
10655         tp->tx_mode = TX_MODE_ENABLE;
10656
10657         if (tg3_flag(tp, 5755_PLUS) ||
10658             tg3_asic_rev(tp) == ASIC_REV_5906)
10659                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10660
10661         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10662             tg3_asic_rev(tp) == ASIC_REV_5762) {
10663                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10664                 tp->tx_mode &= ~val;
10665                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10666         }
10667
10668         tw32_f(MAC_TX_MODE, tp->tx_mode);
10669         udelay(100);
10670
10671         if (tg3_flag(tp, ENABLE_RSS)) {
10672                 u32 rss_key[10];
10673
10674                 tg3_rss_write_indir_tbl(tp);
10675
10676                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10677
10678                 for (i = 0; i < 10 ; i++)
10679                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10680         }
10681
10682         tp->rx_mode = RX_MODE_ENABLE;
10683         if (tg3_flag(tp, 5755_PLUS))
10684                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10685
10686         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10687                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10688
10689         if (tg3_flag(tp, ENABLE_RSS))
10690                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10691                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10692                                RX_MODE_RSS_IPV6_HASH_EN |
10693                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10694                                RX_MODE_RSS_IPV4_HASH_EN |
10695                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10696
10697         tw32_f(MAC_RX_MODE, tp->rx_mode);
10698         udelay(10);
10699
10700         tw32(MAC_LED_CTRL, tp->led_ctrl);
10701
10702         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10703         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10704                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10705                 udelay(10);
10706         }
10707         tw32_f(MAC_RX_MODE, tp->rx_mode);
10708         udelay(10);
10709
10710         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10711                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10712                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10713                         /* Set drive transmission level to 1.2V  */
10714                         /* only if the signal pre-emphasis bit is not set  */
10715                         val = tr32(MAC_SERDES_CFG);
10716                         val &= 0xfffff000;
10717                         val |= 0x880;
10718                         tw32(MAC_SERDES_CFG, val);
10719                 }
10720                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10721                         tw32(MAC_SERDES_CFG, 0x616000);
10722         }
10723
10724         /* Prevent chip from dropping frames when flow control
10725          * is enabled.
10726          */
10727         if (tg3_flag(tp, 57765_CLASS))
10728                 val = 1;
10729         else
10730                 val = 2;
10731         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10732
10733         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10734             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10735                 /* Use hardware link auto-negotiation */
10736                 tg3_flag_set(tp, HW_AUTONEG);
10737         }
10738
10739         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10740             tg3_asic_rev(tp) == ASIC_REV_5714) {
10741                 u32 tmp;
10742
10743                 tmp = tr32(SERDES_RX_CTRL);
10744                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10745                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10746                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10747                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10748         }
10749
10750         if (!tg3_flag(tp, USE_PHYLIB)) {
10751                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10752                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10753
10754                 err = tg3_setup_phy(tp, false);
10755                 if (err)
10756                         return err;
10757
10758                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10759                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10760                         u32 tmp;
10761
10762                         /* Clear CRC stats. */
10763                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10764                                 tg3_writephy(tp, MII_TG3_TEST1,
10765                                              tmp | MII_TG3_TEST1_CRC_EN);
10766                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10767                         }
10768                 }
10769         }
10770
10771         __tg3_set_rx_mode(tp->dev);
10772
10773         /* Initialize receive rules. */
10774         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10775         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10776         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10777         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10778
10779         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10780                 limit = 8;
10781         else
10782                 limit = 16;
10783         if (tg3_flag(tp, ENABLE_ASF))
10784                 limit -= 4;
10785         switch (limit) {
10786         case 16:
10787                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10788                 fallthrough;
10789         case 15:
10790                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10791                 fallthrough;
10792         case 14:
10793                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10794                 fallthrough;
10795         case 13:
10796                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10797                 fallthrough;
10798         case 12:
10799                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10800                 fallthrough;
10801         case 11:
10802                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10803                 fallthrough;
10804         case 10:
10805                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10806                 fallthrough;
10807         case 9:
10808                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10809                 fallthrough;
10810         case 8:
10811                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10812                 fallthrough;
10813         case 7:
10814                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10815                 fallthrough;
10816         case 6:
10817                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10818                 fallthrough;
10819         case 5:
10820                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10821                 fallthrough;
10822         case 4:
10823                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10824         case 3:
10825                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10826         case 2:
10827         case 1:
10828
10829         default:
10830                 break;
10831         }
10832
10833         if (tg3_flag(tp, ENABLE_APE))
10834                 /* Write our heartbeat update interval to APE. */
10835                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10836                                 APE_HOST_HEARTBEAT_INT_5SEC);
10837
10838         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10839
10840         return 0;
10841 }
10842
10843 /* Called at device open time to get the chip ready for
10844  * packet processing.  Invoked with tp->lock held.
10845  */
10846 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10847 {
10848         /* Chip may have been just powered on. If so, the boot code may still
10849          * be running initialization. Wait for it to finish to avoid races in
10850          * accessing the hardware.
10851          */
10852         tg3_enable_register_access(tp);
10853         tg3_poll_fw(tp);
10854
10855         tg3_switch_clocks(tp);
10856
10857         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10858
10859         return tg3_reset_hw(tp, reset_phy);
10860 }
10861
10862 #ifdef CONFIG_TIGON3_HWMON
10863 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10864 {
10865         u32 off, len = TG3_OCIR_LEN;
10866         int i;
10867
10868         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10869                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10870
10871                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10872                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10873                         memset(ocir, 0, len);
10874         }
10875 }
10876
10877 /* sysfs attributes for hwmon */
10878 static ssize_t tg3_show_temp(struct device *dev,
10879                              struct device_attribute *devattr, char *buf)
10880 {
10881         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10882         struct tg3 *tp = dev_get_drvdata(dev);
10883         u32 temperature;
10884
10885         spin_lock_bh(&tp->lock);
10886         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10887                                 sizeof(temperature));
10888         spin_unlock_bh(&tp->lock);
10889         return sprintf(buf, "%u\n", temperature * 1000);
10890 }
10891
10892
10893 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10894                           TG3_TEMP_SENSOR_OFFSET);
10895 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10896                           TG3_TEMP_CAUTION_OFFSET);
10897 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10898                           TG3_TEMP_MAX_OFFSET);
10899
10900 static struct attribute *tg3_attrs[] = {
10901         &sensor_dev_attr_temp1_input.dev_attr.attr,
10902         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10903         &sensor_dev_attr_temp1_max.dev_attr.attr,
10904         NULL
10905 };
10906 ATTRIBUTE_GROUPS(tg3);
10907
10908 static void tg3_hwmon_close(struct tg3 *tp)
10909 {
10910         if (tp->hwmon_dev) {
10911                 hwmon_device_unregister(tp->hwmon_dev);
10912                 tp->hwmon_dev = NULL;
10913         }
10914 }
10915
10916 static void tg3_hwmon_open(struct tg3 *tp)
10917 {
10918         int i;
10919         u32 size = 0;
10920         struct pci_dev *pdev = tp->pdev;
10921         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10922
10923         tg3_sd_scan_scratchpad(tp, ocirs);
10924
10925         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10926                 if (!ocirs[i].src_data_length)
10927                         continue;
10928
10929                 size += ocirs[i].src_hdr_length;
10930                 size += ocirs[i].src_data_length;
10931         }
10932
10933         if (!size)
10934                 return;
10935
10936         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10937                                                           tp, tg3_groups);
10938         if (IS_ERR(tp->hwmon_dev)) {
10939                 tp->hwmon_dev = NULL;
10940                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10941         }
10942 }
10943 #else
10944 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10945 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10946 #endif /* CONFIG_TIGON3_HWMON */
10947
10948
10949 #define TG3_STAT_ADD32(PSTAT, REG) \
10950 do {    u32 __val = tr32(REG); \
10951         (PSTAT)->low += __val; \
10952         if ((PSTAT)->low < __val) \
10953                 (PSTAT)->high += 1; \
10954 } while (0)
10955
10956 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10957 {
10958         struct tg3_hw_stats *sp = tp->hw_stats;
10959
10960         if (!tp->link_up)
10961                 return;
10962
10963         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10964         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10965         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10966         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10967         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10968         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10969         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10970         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10971         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10972         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10973         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10974         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10975         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10976         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10977                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10978                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10979                 u32 val;
10980
10981                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10982                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10983                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10984                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10985         }
10986
10987         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10988         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10989         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10990         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10991         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10992         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10993         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10994         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10995         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10996         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10997         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10998         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10999         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11000         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11001
11002         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11003         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11004             tg3_asic_rev(tp) != ASIC_REV_5762 &&
11005             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11006             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11007                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11008         } else {
11009                 u32 val = tr32(HOSTCC_FLOW_ATTN);
11010                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11011                 if (val) {
11012                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11013                         sp->rx_discards.low += val;
11014                         if (sp->rx_discards.low < val)
11015                                 sp->rx_discards.high += 1;
11016                 }
11017                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11018         }
11019         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11020 }
11021
11022 static void tg3_chk_missed_msi(struct tg3 *tp)
11023 {
11024         u32 i;
11025
11026         for (i = 0; i < tp->irq_cnt; i++) {
11027                 struct tg3_napi *tnapi = &tp->napi[i];
11028
11029                 if (tg3_has_work(tnapi)) {
11030                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11031                             tnapi->last_tx_cons == tnapi->tx_cons) {
11032                                 if (tnapi->chk_msi_cnt < 1) {
11033                                         tnapi->chk_msi_cnt++;
11034                                         return;
11035                                 }
11036                                 tg3_msi(0, tnapi);
11037                         }
11038                 }
11039                 tnapi->chk_msi_cnt = 0;
11040                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11041                 tnapi->last_tx_cons = tnapi->tx_cons;
11042         }
11043 }
11044
11045 static void tg3_timer(struct timer_list *t)
11046 {
11047         struct tg3 *tp = from_timer(tp, t, timer);
11048
11049         spin_lock(&tp->lock);
11050
11051         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11052                 spin_unlock(&tp->lock);
11053                 goto restart_timer;
11054         }
11055
11056         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11057             tg3_flag(tp, 57765_CLASS))
11058                 tg3_chk_missed_msi(tp);
11059
11060         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11061                 /* BCM4785: Flush posted writes from GbE to host memory. */
11062                 tr32(HOSTCC_MODE);
11063         }
11064
11065         if (!tg3_flag(tp, TAGGED_STATUS)) {
11066                 /* All of this garbage is because when using non-tagged
11067                  * IRQ status the mailbox/status_block protocol the chip
11068                  * uses with the cpu is race prone.
11069                  */
11070                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11071                         tw32(GRC_LOCAL_CTRL,
11072                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11073                 } else {
11074                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11075                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11076                 }
11077
11078                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11079                         spin_unlock(&tp->lock);
11080                         tg3_reset_task_schedule(tp);
11081                         goto restart_timer;
11082                 }
11083         }
11084
11085         /* This part only runs once per second. */
11086         if (!--tp->timer_counter) {
11087                 if (tg3_flag(tp, 5705_PLUS))
11088                         tg3_periodic_fetch_stats(tp);
11089
11090                 if (tp->setlpicnt && !--tp->setlpicnt)
11091                         tg3_phy_eee_enable(tp);
11092
11093                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11094                         u32 mac_stat;
11095                         int phy_event;
11096
11097                         mac_stat = tr32(MAC_STATUS);
11098
11099                         phy_event = 0;
11100                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11101                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11102                                         phy_event = 1;
11103                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11104                                 phy_event = 1;
11105
11106                         if (phy_event)
11107                                 tg3_setup_phy(tp, false);
11108                 } else if (tg3_flag(tp, POLL_SERDES)) {
11109                         u32 mac_stat = tr32(MAC_STATUS);
11110                         int need_setup = 0;
11111
11112                         if (tp->link_up &&
11113                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11114                                 need_setup = 1;
11115                         }
11116                         if (!tp->link_up &&
11117                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11118                                          MAC_STATUS_SIGNAL_DET))) {
11119                                 need_setup = 1;
11120                         }
11121                         if (need_setup) {
11122                                 if (!tp->serdes_counter) {
11123                                         tw32_f(MAC_MODE,
11124                                              (tp->mac_mode &
11125                                               ~MAC_MODE_PORT_MODE_MASK));
11126                                         udelay(40);
11127                                         tw32_f(MAC_MODE, tp->mac_mode);
11128                                         udelay(40);
11129                                 }
11130                                 tg3_setup_phy(tp, false);
11131                         }
11132                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11133                            tg3_flag(tp, 5780_CLASS)) {
11134                         tg3_serdes_parallel_detect(tp);
11135                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11136                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11137                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11138                                          TG3_CPMU_STATUS_LINK_MASK);
11139
11140                         if (link_up != tp->link_up)
11141                                 tg3_setup_phy(tp, false);
11142                 }
11143
11144                 tp->timer_counter = tp->timer_multiplier;
11145         }
11146
11147         /* Heartbeat is only sent once every 2 seconds.
11148          *
11149          * The heartbeat is to tell the ASF firmware that the host
11150          * driver is still alive.  In the event that the OS crashes,
11151          * ASF needs to reset the hardware to free up the FIFO space
11152          * that may be filled with rx packets destined for the host.
11153          * If the FIFO is full, ASF will no longer function properly.
11154          *
11155          * Unintended resets have been reported on real time kernels
11156          * where the timer doesn't run on time.  Netpoll will also have
11157          * same problem.
11158          *
11159          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11160          * to check the ring condition when the heartbeat is expiring
11161          * before doing the reset.  This will prevent most unintended
11162          * resets.
11163          */
11164         if (!--tp->asf_counter) {
11165                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11166                         tg3_wait_for_event_ack(tp);
11167
11168                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11169                                       FWCMD_NICDRV_ALIVE3);
11170                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11171                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11172                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11173
11174                         tg3_generate_fw_event(tp);
11175                 }
11176                 tp->asf_counter = tp->asf_multiplier;
11177         }
11178
11179         /* Update the APE heartbeat every 5 seconds.*/
11180         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11181
11182         spin_unlock(&tp->lock);
11183
11184 restart_timer:
11185         tp->timer.expires = jiffies + tp->timer_offset;
11186         add_timer(&tp->timer);
11187 }
11188
11189 static void tg3_timer_init(struct tg3 *tp)
11190 {
11191         if (tg3_flag(tp, TAGGED_STATUS) &&
11192             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11193             !tg3_flag(tp, 57765_CLASS))
11194                 tp->timer_offset = HZ;
11195         else
11196                 tp->timer_offset = HZ / 10;
11197
11198         BUG_ON(tp->timer_offset > HZ);
11199
11200         tp->timer_multiplier = (HZ / tp->timer_offset);
11201         tp->asf_multiplier = (HZ / tp->timer_offset) *
11202                              TG3_FW_UPDATE_FREQ_SEC;
11203
11204         timer_setup(&tp->timer, tg3_timer, 0);
11205 }
11206
11207 static void tg3_timer_start(struct tg3 *tp)
11208 {
11209         tp->asf_counter   = tp->asf_multiplier;
11210         tp->timer_counter = tp->timer_multiplier;
11211
11212         tp->timer.expires = jiffies + tp->timer_offset;
11213         add_timer(&tp->timer);
11214 }
11215
11216 static void tg3_timer_stop(struct tg3 *tp)
11217 {
11218         del_timer_sync(&tp->timer);
11219 }
11220
11221 /* Restart hardware after configuration changes, self-test, etc.
11222  * Invoked with tp->lock held.
11223  */
11224 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11225         __releases(tp->lock)
11226         __acquires(tp->lock)
11227 {
11228         int err;
11229
11230         err = tg3_init_hw(tp, reset_phy);
11231         if (err) {
11232                 netdev_err(tp->dev,
11233                            "Failed to re-initialize device, aborting\n");
11234                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11235                 tg3_full_unlock(tp);
11236                 tg3_timer_stop(tp);
11237                 tp->irq_sync = 0;
11238                 tg3_napi_enable(tp);
11239                 dev_close(tp->dev);
11240                 tg3_full_lock(tp, 0);
11241         }
11242         return err;
11243 }
11244
11245 static void tg3_reset_task(struct work_struct *work)
11246 {
11247         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11248         int err;
11249
11250         rtnl_lock();
11251         tg3_full_lock(tp, 0);
11252
11253         if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11254                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11255                 tg3_full_unlock(tp);
11256                 rtnl_unlock();
11257                 return;
11258         }
11259
11260         tg3_full_unlock(tp);
11261
11262         tg3_phy_stop(tp);
11263
11264         tg3_netif_stop(tp);
11265
11266         tg3_full_lock(tp, 1);
11267
11268         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11269                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11270                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11271                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11272                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11273         }
11274
11275         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11276         err = tg3_init_hw(tp, true);
11277         if (err) {
11278                 tg3_full_unlock(tp);
11279                 tp->irq_sync = 0;
11280                 tg3_napi_enable(tp);
11281                 /* Clear this flag so that tg3_reset_task_cancel() will not
11282                  * call cancel_work_sync() and wait forever.
11283                  */
11284                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11285                 dev_close(tp->dev);
11286                 goto out;
11287         }
11288
11289         tg3_netif_start(tp);
11290         tg3_full_unlock(tp);
11291         tg3_phy_start(tp);
11292         tg3_flag_clear(tp, RESET_TASK_PENDING);
11293 out:
11294         rtnl_unlock();
11295 }
11296
11297 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11298 {
11299         irq_handler_t fn;
11300         unsigned long flags;
11301         char *name;
11302         struct tg3_napi *tnapi = &tp->napi[irq_num];
11303
11304         if (tp->irq_cnt == 1)
11305                 name = tp->dev->name;
11306         else {
11307                 name = &tnapi->irq_lbl[0];
11308                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11309                         snprintf(name, IFNAMSIZ,
11310                                  "%s-txrx-%d", tp->dev->name, irq_num);
11311                 else if (tnapi->tx_buffers)
11312                         snprintf(name, IFNAMSIZ,
11313                                  "%s-tx-%d", tp->dev->name, irq_num);
11314                 else if (tnapi->rx_rcb)
11315                         snprintf(name, IFNAMSIZ,
11316                                  "%s-rx-%d", tp->dev->name, irq_num);
11317                 else
11318                         snprintf(name, IFNAMSIZ,
11319                                  "%s-%d", tp->dev->name, irq_num);
11320                 name[IFNAMSIZ-1] = 0;
11321         }
11322
11323         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11324                 fn = tg3_msi;
11325                 if (tg3_flag(tp, 1SHOT_MSI))
11326                         fn = tg3_msi_1shot;
11327                 flags = 0;
11328         } else {
11329                 fn = tg3_interrupt;
11330                 if (tg3_flag(tp, TAGGED_STATUS))
11331                         fn = tg3_interrupt_tagged;
11332                 flags = IRQF_SHARED;
11333         }
11334
11335         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11336 }
11337
11338 static int tg3_test_interrupt(struct tg3 *tp)
11339 {
11340         struct tg3_napi *tnapi = &tp->napi[0];
11341         struct net_device *dev = tp->dev;
11342         int err, i, intr_ok = 0;
11343         u32 val;
11344
11345         if (!netif_running(dev))
11346                 return -ENODEV;
11347
11348         tg3_disable_ints(tp);
11349
11350         free_irq(tnapi->irq_vec, tnapi);
11351
11352         /*
11353          * Turn off MSI one shot mode.  Otherwise this test has no
11354          * observable way to know whether the interrupt was delivered.
11355          */
11356         if (tg3_flag(tp, 57765_PLUS)) {
11357                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11358                 tw32(MSGINT_MODE, val);
11359         }
11360
11361         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11362                           IRQF_SHARED, dev->name, tnapi);
11363         if (err)
11364                 return err;
11365
11366         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11367         tg3_enable_ints(tp);
11368
11369         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11370                tnapi->coal_now);
11371
11372         for (i = 0; i < 5; i++) {
11373                 u32 int_mbox, misc_host_ctrl;
11374
11375                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11376                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11377
11378                 if ((int_mbox != 0) ||
11379                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11380                         intr_ok = 1;
11381                         break;
11382                 }
11383
11384                 if (tg3_flag(tp, 57765_PLUS) &&
11385                     tnapi->hw_status->status_tag != tnapi->last_tag)
11386                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11387
11388                 msleep(10);
11389         }
11390
11391         tg3_disable_ints(tp);
11392
11393         free_irq(tnapi->irq_vec, tnapi);
11394
11395         err = tg3_request_irq(tp, 0);
11396
11397         if (err)
11398                 return err;
11399
11400         if (intr_ok) {
11401                 /* Reenable MSI one shot mode. */
11402                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11403                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11404                         tw32(MSGINT_MODE, val);
11405                 }
11406                 return 0;
11407         }
11408
11409         return -EIO;
11410 }
11411
11412 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11413  * successfully restored
11414  */
11415 static int tg3_test_msi(struct tg3 *tp)
11416 {
11417         int err;
11418         u16 pci_cmd;
11419
11420         if (!tg3_flag(tp, USING_MSI))
11421                 return 0;
11422
11423         /* Turn off SERR reporting in case MSI terminates with Master
11424          * Abort.
11425          */
11426         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11427         pci_write_config_word(tp->pdev, PCI_COMMAND,
11428                               pci_cmd & ~PCI_COMMAND_SERR);
11429
11430         err = tg3_test_interrupt(tp);
11431
11432         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11433
11434         if (!err)
11435                 return 0;
11436
11437         /* other failures */
11438         if (err != -EIO)
11439                 return err;
11440
11441         /* MSI test failed, go back to INTx mode */
11442         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11443                     "to INTx mode. Please report this failure to the PCI "
11444                     "maintainer and include system chipset information\n");
11445
11446         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11447
11448         pci_disable_msi(tp->pdev);
11449
11450         tg3_flag_clear(tp, USING_MSI);
11451         tp->napi[0].irq_vec = tp->pdev->irq;
11452
11453         err = tg3_request_irq(tp, 0);
11454         if (err)
11455                 return err;
11456
11457         /* Need to reset the chip because the MSI cycle may have terminated
11458          * with Master Abort.
11459          */
11460         tg3_full_lock(tp, 1);
11461
11462         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11463         err = tg3_init_hw(tp, true);
11464
11465         tg3_full_unlock(tp);
11466
11467         if (err)
11468                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11469
11470         return err;
11471 }
11472
11473 static int tg3_request_firmware(struct tg3 *tp)
11474 {
11475         const struct tg3_firmware_hdr *fw_hdr;
11476
11477         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11478                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11479                            tp->fw_needed);
11480                 return -ENOENT;
11481         }
11482
11483         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11484
11485         /* Firmware blob starts with version numbers, followed by
11486          * start address and _full_ length including BSS sections
11487          * (which must be longer than the actual data, of course
11488          */
11489
11490         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11491         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11492                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11493                            tp->fw_len, tp->fw_needed);
11494                 release_firmware(tp->fw);
11495                 tp->fw = NULL;
11496                 return -EINVAL;
11497         }
11498
11499         /* We no longer need firmware; we have it. */
11500         tp->fw_needed = NULL;
11501         return 0;
11502 }
11503
11504 static u32 tg3_irq_count(struct tg3 *tp)
11505 {
11506         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11507
11508         if (irq_cnt > 1) {
11509                 /* We want as many rx rings enabled as there are cpus.
11510                  * In multiqueue MSI-X mode, the first MSI-X vector
11511                  * only deals with link interrupts, etc, so we add
11512                  * one to the number of vectors we are requesting.
11513                  */
11514                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11515         }
11516
11517         return irq_cnt;
11518 }
11519
11520 static bool tg3_enable_msix(struct tg3 *tp)
11521 {
11522         int i, rc;
11523         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11524
11525         tp->txq_cnt = tp->txq_req;
11526         tp->rxq_cnt = tp->rxq_req;
11527         if (!tp->rxq_cnt)
11528                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11529         if (tp->rxq_cnt > tp->rxq_max)
11530                 tp->rxq_cnt = tp->rxq_max;
11531
11532         /* Disable multiple TX rings by default.  Simple round-robin hardware
11533          * scheduling of the TX rings can cause starvation of rings with
11534          * small packets when other rings have TSO or jumbo packets.
11535          */
11536         if (!tp->txq_req)
11537                 tp->txq_cnt = 1;
11538
11539         tp->irq_cnt = tg3_irq_count(tp);
11540
11541         for (i = 0; i < tp->irq_max; i++) {
11542                 msix_ent[i].entry  = i;
11543                 msix_ent[i].vector = 0;
11544         }
11545
11546         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11547         if (rc < 0) {
11548                 return false;
11549         } else if (rc < tp->irq_cnt) {
11550                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11551                               tp->irq_cnt, rc);
11552                 tp->irq_cnt = rc;
11553                 tp->rxq_cnt = max(rc - 1, 1);
11554                 if (tp->txq_cnt)
11555                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11556         }
11557
11558         for (i = 0; i < tp->irq_max; i++)
11559                 tp->napi[i].irq_vec = msix_ent[i].vector;
11560
11561         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11562                 pci_disable_msix(tp->pdev);
11563                 return false;
11564         }
11565
11566         if (tp->irq_cnt == 1)
11567                 return true;
11568
11569         tg3_flag_set(tp, ENABLE_RSS);
11570
11571         if (tp->txq_cnt > 1)
11572                 tg3_flag_set(tp, ENABLE_TSS);
11573
11574         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11575
11576         return true;
11577 }
11578
11579 static void tg3_ints_init(struct tg3 *tp)
11580 {
11581         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11582             !tg3_flag(tp, TAGGED_STATUS)) {
11583                 /* All MSI supporting chips should support tagged
11584                  * status.  Assert that this is the case.
11585                  */
11586                 netdev_warn(tp->dev,
11587                             "MSI without TAGGED_STATUS? Not using MSI\n");
11588                 goto defcfg;
11589         }
11590
11591         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11592                 tg3_flag_set(tp, USING_MSIX);
11593         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11594                 tg3_flag_set(tp, USING_MSI);
11595
11596         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11597                 u32 msi_mode = tr32(MSGINT_MODE);
11598                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11599                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11600                 if (!tg3_flag(tp, 1SHOT_MSI))
11601                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11602                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11603         }
11604 defcfg:
11605         if (!tg3_flag(tp, USING_MSIX)) {
11606                 tp->irq_cnt = 1;
11607                 tp->napi[0].irq_vec = tp->pdev->irq;
11608         }
11609
11610         if (tp->irq_cnt == 1) {
11611                 tp->txq_cnt = 1;
11612                 tp->rxq_cnt = 1;
11613                 netif_set_real_num_tx_queues(tp->dev, 1);
11614                 netif_set_real_num_rx_queues(tp->dev, 1);
11615         }
11616 }
11617
11618 static void tg3_ints_fini(struct tg3 *tp)
11619 {
11620         if (tg3_flag(tp, USING_MSIX))
11621                 pci_disable_msix(tp->pdev);
11622         else if (tg3_flag(tp, USING_MSI))
11623                 pci_disable_msi(tp->pdev);
11624         tg3_flag_clear(tp, USING_MSI);
11625         tg3_flag_clear(tp, USING_MSIX);
11626         tg3_flag_clear(tp, ENABLE_RSS);
11627         tg3_flag_clear(tp, ENABLE_TSS);
11628 }
11629
11630 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11631                      bool init)
11632 {
11633         struct net_device *dev = tp->dev;
11634         int i, err;
11635
11636         /*
11637          * Setup interrupts first so we know how
11638          * many NAPI resources to allocate
11639          */
11640         tg3_ints_init(tp);
11641
11642         tg3_rss_check_indir_tbl(tp);
11643
11644         /* The placement of this call is tied
11645          * to the setup and use of Host TX descriptors.
11646          */
11647         err = tg3_alloc_consistent(tp);
11648         if (err)
11649                 goto out_ints_fini;
11650
11651         tg3_napi_init(tp);
11652
11653         tg3_napi_enable(tp);
11654
11655         for (i = 0; i < tp->irq_cnt; i++) {
11656                 err = tg3_request_irq(tp, i);
11657                 if (err) {
11658                         for (i--; i >= 0; i--) {
11659                                 struct tg3_napi *tnapi = &tp->napi[i];
11660
11661                                 free_irq(tnapi->irq_vec, tnapi);
11662                         }
11663                         goto out_napi_fini;
11664                 }
11665         }
11666
11667         tg3_full_lock(tp, 0);
11668
11669         if (init)
11670                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11671
11672         err = tg3_init_hw(tp, reset_phy);
11673         if (err) {
11674                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11675                 tg3_free_rings(tp);
11676         }
11677
11678         tg3_full_unlock(tp);
11679
11680         if (err)
11681                 goto out_free_irq;
11682
11683         if (test_irq && tg3_flag(tp, USING_MSI)) {
11684                 err = tg3_test_msi(tp);
11685
11686                 if (err) {
11687                         tg3_full_lock(tp, 0);
11688                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11689                         tg3_free_rings(tp);
11690                         tg3_full_unlock(tp);
11691
11692                         goto out_napi_fini;
11693                 }
11694
11695                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11696                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11697
11698                         tw32(PCIE_TRANSACTION_CFG,
11699                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11700                 }
11701         }
11702
11703         tg3_phy_start(tp);
11704
11705         tg3_hwmon_open(tp);
11706
11707         tg3_full_lock(tp, 0);
11708
11709         tg3_timer_start(tp);
11710         tg3_flag_set(tp, INIT_COMPLETE);
11711         tg3_enable_ints(tp);
11712
11713         tg3_ptp_resume(tp);
11714
11715         tg3_full_unlock(tp);
11716
11717         netif_tx_start_all_queues(dev);
11718
11719         /*
11720          * Reset loopback feature if it was turned on while the device was down
11721          * make sure that it's installed properly now.
11722          */
11723         if (dev->features & NETIF_F_LOOPBACK)
11724                 tg3_set_loopback(dev, dev->features);
11725
11726         return 0;
11727
11728 out_free_irq:
11729         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11730                 struct tg3_napi *tnapi = &tp->napi[i];
11731                 free_irq(tnapi->irq_vec, tnapi);
11732         }
11733
11734 out_napi_fini:
11735         tg3_napi_disable(tp);
11736         tg3_napi_fini(tp);
11737         tg3_free_consistent(tp);
11738
11739 out_ints_fini:
11740         tg3_ints_fini(tp);
11741
11742         return err;
11743 }
11744
11745 static void tg3_stop(struct tg3 *tp)
11746 {
11747         int i;
11748
11749         tg3_reset_task_cancel(tp);
11750         tg3_netif_stop(tp);
11751
11752         tg3_timer_stop(tp);
11753
11754         tg3_hwmon_close(tp);
11755
11756         tg3_phy_stop(tp);
11757
11758         tg3_full_lock(tp, 1);
11759
11760         tg3_disable_ints(tp);
11761
11762         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11763         tg3_free_rings(tp);
11764         tg3_flag_clear(tp, INIT_COMPLETE);
11765
11766         tg3_full_unlock(tp);
11767
11768         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11769                 struct tg3_napi *tnapi = &tp->napi[i];
11770                 free_irq(tnapi->irq_vec, tnapi);
11771         }
11772
11773         tg3_ints_fini(tp);
11774
11775         tg3_napi_fini(tp);
11776
11777         tg3_free_consistent(tp);
11778 }
11779
11780 static int tg3_open(struct net_device *dev)
11781 {
11782         struct tg3 *tp = netdev_priv(dev);
11783         int err;
11784
11785         if (tp->pcierr_recovery) {
11786                 netdev_err(dev, "Failed to open device. PCI error recovery "
11787                            "in progress\n");
11788                 return -EAGAIN;
11789         }
11790
11791         if (tp->fw_needed) {
11792                 err = tg3_request_firmware(tp);
11793                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11794                         if (err) {
11795                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11796                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11797                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11798                                 netdev_warn(tp->dev, "EEE capability restored\n");
11799                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11800                         }
11801                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11802                         if (err)
11803                                 return err;
11804                 } else if (err) {
11805                         netdev_warn(tp->dev, "TSO capability disabled\n");
11806                         tg3_flag_clear(tp, TSO_CAPABLE);
11807                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11808                         netdev_notice(tp->dev, "TSO capability restored\n");
11809                         tg3_flag_set(tp, TSO_CAPABLE);
11810                 }
11811         }
11812
11813         tg3_carrier_off(tp);
11814
11815         err = tg3_power_up(tp);
11816         if (err)
11817                 return err;
11818
11819         tg3_full_lock(tp, 0);
11820
11821         tg3_disable_ints(tp);
11822         tg3_flag_clear(tp, INIT_COMPLETE);
11823
11824         tg3_full_unlock(tp);
11825
11826         err = tg3_start(tp,
11827                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11828                         true, true);
11829         if (err) {
11830                 tg3_frob_aux_power(tp, false);
11831                 pci_set_power_state(tp->pdev, PCI_D3hot);
11832         }
11833
11834         return err;
11835 }
11836
11837 static int tg3_close(struct net_device *dev)
11838 {
11839         struct tg3 *tp = netdev_priv(dev);
11840
11841         if (tp->pcierr_recovery) {
11842                 netdev_err(dev, "Failed to close device. PCI error recovery "
11843                            "in progress\n");
11844                 return -EAGAIN;
11845         }
11846
11847         tg3_stop(tp);
11848
11849         if (pci_device_is_present(tp->pdev)) {
11850                 tg3_power_down_prepare(tp);
11851
11852                 tg3_carrier_off(tp);
11853         }
11854         return 0;
11855 }
11856
11857 static inline u64 get_stat64(tg3_stat64_t *val)
11858 {
11859        return ((u64)val->high << 32) | ((u64)val->low);
11860 }
11861
11862 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11863 {
11864         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11865
11866         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11867             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11868              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11869                 u32 val;
11870
11871                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11872                         tg3_writephy(tp, MII_TG3_TEST1,
11873                                      val | MII_TG3_TEST1_CRC_EN);
11874                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11875                 } else
11876                         val = 0;
11877
11878                 tp->phy_crc_errors += val;
11879
11880                 return tp->phy_crc_errors;
11881         }
11882
11883         return get_stat64(&hw_stats->rx_fcs_errors);
11884 }
11885
11886 #define ESTAT_ADD(member) \
11887         estats->member =        old_estats->member + \
11888                                 get_stat64(&hw_stats->member)
11889
11890 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11891 {
11892         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11893         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11894
11895         ESTAT_ADD(rx_octets);
11896         ESTAT_ADD(rx_fragments);
11897         ESTAT_ADD(rx_ucast_packets);
11898         ESTAT_ADD(rx_mcast_packets);
11899         ESTAT_ADD(rx_bcast_packets);
11900         ESTAT_ADD(rx_fcs_errors);
11901         ESTAT_ADD(rx_align_errors);
11902         ESTAT_ADD(rx_xon_pause_rcvd);
11903         ESTAT_ADD(rx_xoff_pause_rcvd);
11904         ESTAT_ADD(rx_mac_ctrl_rcvd);
11905         ESTAT_ADD(rx_xoff_entered);
11906         ESTAT_ADD(rx_frame_too_long_errors);
11907         ESTAT_ADD(rx_jabbers);
11908         ESTAT_ADD(rx_undersize_packets);
11909         ESTAT_ADD(rx_in_length_errors);
11910         ESTAT_ADD(rx_out_length_errors);
11911         ESTAT_ADD(rx_64_or_less_octet_packets);
11912         ESTAT_ADD(rx_65_to_127_octet_packets);
11913         ESTAT_ADD(rx_128_to_255_octet_packets);
11914         ESTAT_ADD(rx_256_to_511_octet_packets);
11915         ESTAT_ADD(rx_512_to_1023_octet_packets);
11916         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11917         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11918         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11919         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11920         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11921
11922         ESTAT_ADD(tx_octets);
11923         ESTAT_ADD(tx_collisions);
11924         ESTAT_ADD(tx_xon_sent);
11925         ESTAT_ADD(tx_xoff_sent);
11926         ESTAT_ADD(tx_flow_control);
11927         ESTAT_ADD(tx_mac_errors);
11928         ESTAT_ADD(tx_single_collisions);
11929         ESTAT_ADD(tx_mult_collisions);
11930         ESTAT_ADD(tx_deferred);
11931         ESTAT_ADD(tx_excessive_collisions);
11932         ESTAT_ADD(tx_late_collisions);
11933         ESTAT_ADD(tx_collide_2times);
11934         ESTAT_ADD(tx_collide_3times);
11935         ESTAT_ADD(tx_collide_4times);
11936         ESTAT_ADD(tx_collide_5times);
11937         ESTAT_ADD(tx_collide_6times);
11938         ESTAT_ADD(tx_collide_7times);
11939         ESTAT_ADD(tx_collide_8times);
11940         ESTAT_ADD(tx_collide_9times);
11941         ESTAT_ADD(tx_collide_10times);
11942         ESTAT_ADD(tx_collide_11times);
11943         ESTAT_ADD(tx_collide_12times);
11944         ESTAT_ADD(tx_collide_13times);
11945         ESTAT_ADD(tx_collide_14times);
11946         ESTAT_ADD(tx_collide_15times);
11947         ESTAT_ADD(tx_ucast_packets);
11948         ESTAT_ADD(tx_mcast_packets);
11949         ESTAT_ADD(tx_bcast_packets);
11950         ESTAT_ADD(tx_carrier_sense_errors);
11951         ESTAT_ADD(tx_discards);
11952         ESTAT_ADD(tx_errors);
11953
11954         ESTAT_ADD(dma_writeq_full);
11955         ESTAT_ADD(dma_write_prioq_full);
11956         ESTAT_ADD(rxbds_empty);
11957         ESTAT_ADD(rx_discards);
11958         ESTAT_ADD(rx_errors);
11959         ESTAT_ADD(rx_threshold_hit);
11960
11961         ESTAT_ADD(dma_readq_full);
11962         ESTAT_ADD(dma_read_prioq_full);
11963         ESTAT_ADD(tx_comp_queue_full);
11964
11965         ESTAT_ADD(ring_set_send_prod_index);
11966         ESTAT_ADD(ring_status_update);
11967         ESTAT_ADD(nic_irqs);
11968         ESTAT_ADD(nic_avoided_irqs);
11969         ESTAT_ADD(nic_tx_threshold_hit);
11970
11971         ESTAT_ADD(mbuf_lwm_thresh_hit);
11972 }
11973
11974 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11975 {
11976         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11977         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11978
11979         stats->rx_packets = old_stats->rx_packets +
11980                 get_stat64(&hw_stats->rx_ucast_packets) +
11981                 get_stat64(&hw_stats->rx_mcast_packets) +
11982                 get_stat64(&hw_stats->rx_bcast_packets);
11983
11984         stats->tx_packets = old_stats->tx_packets +
11985                 get_stat64(&hw_stats->tx_ucast_packets) +
11986                 get_stat64(&hw_stats->tx_mcast_packets) +
11987                 get_stat64(&hw_stats->tx_bcast_packets);
11988
11989         stats->rx_bytes = old_stats->rx_bytes +
11990                 get_stat64(&hw_stats->rx_octets);
11991         stats->tx_bytes = old_stats->tx_bytes +
11992                 get_stat64(&hw_stats->tx_octets);
11993
11994         stats->rx_errors = old_stats->rx_errors +
11995                 get_stat64(&hw_stats->rx_errors);
11996         stats->tx_errors = old_stats->tx_errors +
11997                 get_stat64(&hw_stats->tx_errors) +
11998                 get_stat64(&hw_stats->tx_mac_errors) +
11999                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12000                 get_stat64(&hw_stats->tx_discards);
12001
12002         stats->multicast = old_stats->multicast +
12003                 get_stat64(&hw_stats->rx_mcast_packets);
12004         stats->collisions = old_stats->collisions +
12005                 get_stat64(&hw_stats->tx_collisions);
12006
12007         stats->rx_length_errors = old_stats->rx_length_errors +
12008                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12009                 get_stat64(&hw_stats->rx_undersize_packets);
12010
12011         stats->rx_frame_errors = old_stats->rx_frame_errors +
12012                 get_stat64(&hw_stats->rx_align_errors);
12013         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12014                 get_stat64(&hw_stats->tx_discards);
12015         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12016                 get_stat64(&hw_stats->tx_carrier_sense_errors);
12017
12018         stats->rx_crc_errors = old_stats->rx_crc_errors +
12019                 tg3_calc_crc_errors(tp);
12020
12021         stats->rx_missed_errors = old_stats->rx_missed_errors +
12022                 get_stat64(&hw_stats->rx_discards);
12023
12024         stats->rx_dropped = tp->rx_dropped;
12025         stats->tx_dropped = tp->tx_dropped;
12026 }
12027
12028 static int tg3_get_regs_len(struct net_device *dev)
12029 {
12030         return TG3_REG_BLK_SIZE;
12031 }
12032
12033 static void tg3_get_regs(struct net_device *dev,
12034                 struct ethtool_regs *regs, void *_p)
12035 {
12036         struct tg3 *tp = netdev_priv(dev);
12037
12038         regs->version = 0;
12039
12040         memset(_p, 0, TG3_REG_BLK_SIZE);
12041
12042         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12043                 return;
12044
12045         tg3_full_lock(tp, 0);
12046
12047         tg3_dump_legacy_regs(tp, (u32 *)_p);
12048
12049         tg3_full_unlock(tp);
12050 }
12051
12052 static int tg3_get_eeprom_len(struct net_device *dev)
12053 {
12054         struct tg3 *tp = netdev_priv(dev);
12055
12056         return tp->nvram_size;
12057 }
12058
12059 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12060 {
12061         struct tg3 *tp = netdev_priv(dev);
12062         int ret, cpmu_restore = 0;
12063         u8  *pd;
12064         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12065         __be32 val;
12066
12067         if (tg3_flag(tp, NO_NVRAM))
12068                 return -EINVAL;
12069
12070         offset = eeprom->offset;
12071         len = eeprom->len;
12072         eeprom->len = 0;
12073
12074         eeprom->magic = TG3_EEPROM_MAGIC;
12075
12076         /* Override clock, link aware and link idle modes */
12077         if (tg3_flag(tp, CPMU_PRESENT)) {
12078                 cpmu_val = tr32(TG3_CPMU_CTRL);
12079                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12080                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12081                         tw32(TG3_CPMU_CTRL, cpmu_val &
12082                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12083                                              CPMU_CTRL_LINK_IDLE_MODE));
12084                         cpmu_restore = 1;
12085                 }
12086         }
12087         tg3_override_clk(tp);
12088
12089         if (offset & 3) {
12090                 /* adjustments to start on required 4 byte boundary */
12091                 b_offset = offset & 3;
12092                 b_count = 4 - b_offset;
12093                 if (b_count > len) {
12094                         /* i.e. offset=1 len=2 */
12095                         b_count = len;
12096                 }
12097                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12098                 if (ret)
12099                         goto eeprom_done;
12100                 memcpy(data, ((char *)&val) + b_offset, b_count);
12101                 len -= b_count;
12102                 offset += b_count;
12103                 eeprom->len += b_count;
12104         }
12105
12106         /* read bytes up to the last 4 byte boundary */
12107         pd = &data[eeprom->len];
12108         for (i = 0; i < (len - (len & 3)); i += 4) {
12109                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12110                 if (ret) {
12111                         if (i)
12112                                 i -= 4;
12113                         eeprom->len += i;
12114                         goto eeprom_done;
12115                 }
12116                 memcpy(pd + i, &val, 4);
12117                 if (need_resched()) {
12118                         if (signal_pending(current)) {
12119                                 eeprom->len += i;
12120                                 ret = -EINTR;
12121                                 goto eeprom_done;
12122                         }
12123                         cond_resched();
12124                 }
12125         }
12126         eeprom->len += i;
12127
12128         if (len & 3) {
12129                 /* read last bytes not ending on 4 byte boundary */
12130                 pd = &data[eeprom->len];
12131                 b_count = len & 3;
12132                 b_offset = offset + len - b_count;
12133                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12134                 if (ret)
12135                         goto eeprom_done;
12136                 memcpy(pd, &val, b_count);
12137                 eeprom->len += b_count;
12138         }
12139         ret = 0;
12140
12141 eeprom_done:
12142         /* Restore clock, link aware and link idle modes */
12143         tg3_restore_clk(tp);
12144         if (cpmu_restore)
12145                 tw32(TG3_CPMU_CTRL, cpmu_val);
12146
12147         return ret;
12148 }
12149
12150 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12151 {
12152         struct tg3 *tp = netdev_priv(dev);
12153         int ret;
12154         u32 offset, len, b_offset, odd_len;
12155         u8 *buf;
12156         __be32 start = 0, end;
12157
12158         if (tg3_flag(tp, NO_NVRAM) ||
12159             eeprom->magic != TG3_EEPROM_MAGIC)
12160                 return -EINVAL;
12161
12162         offset = eeprom->offset;
12163         len = eeprom->len;
12164
12165         if ((b_offset = (offset & 3))) {
12166                 /* adjustments to start on required 4 byte boundary */
12167                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12168                 if (ret)
12169                         return ret;
12170                 len += b_offset;
12171                 offset &= ~3;
12172                 if (len < 4)
12173                         len = 4;
12174         }
12175
12176         odd_len = 0;
12177         if (len & 3) {
12178                 /* adjustments to end on required 4 byte boundary */
12179                 odd_len = 1;
12180                 len = (len + 3) & ~3;
12181                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12182                 if (ret)
12183                         return ret;
12184         }
12185
12186         buf = data;
12187         if (b_offset || odd_len) {
12188                 buf = kmalloc(len, GFP_KERNEL);
12189                 if (!buf)
12190                         return -ENOMEM;
12191                 if (b_offset)
12192                         memcpy(buf, &start, 4);
12193                 if (odd_len)
12194                         memcpy(buf+len-4, &end, 4);
12195                 memcpy(buf + b_offset, data, eeprom->len);
12196         }
12197
12198         ret = tg3_nvram_write_block(tp, offset, len, buf);
12199
12200         if (buf != data)
12201                 kfree(buf);
12202
12203         return ret;
12204 }
12205
12206 static int tg3_get_link_ksettings(struct net_device *dev,
12207                                   struct ethtool_link_ksettings *cmd)
12208 {
12209         struct tg3 *tp = netdev_priv(dev);
12210         u32 supported, advertising;
12211
12212         if (tg3_flag(tp, USE_PHYLIB)) {
12213                 struct phy_device *phydev;
12214                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12215                         return -EAGAIN;
12216                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12217                 phy_ethtool_ksettings_get(phydev, cmd);
12218
12219                 return 0;
12220         }
12221
12222         supported = (SUPPORTED_Autoneg);
12223
12224         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12225                 supported |= (SUPPORTED_1000baseT_Half |
12226                               SUPPORTED_1000baseT_Full);
12227
12228         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12229                 supported |= (SUPPORTED_100baseT_Half |
12230                               SUPPORTED_100baseT_Full |
12231                               SUPPORTED_10baseT_Half |
12232                               SUPPORTED_10baseT_Full |
12233                               SUPPORTED_TP);
12234                 cmd->base.port = PORT_TP;
12235         } else {
12236                 supported |= SUPPORTED_FIBRE;
12237                 cmd->base.port = PORT_FIBRE;
12238         }
12239         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12240                                                 supported);
12241
12242         advertising = tp->link_config.advertising;
12243         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12244                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12245                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12246                                 advertising |= ADVERTISED_Pause;
12247                         } else {
12248                                 advertising |= ADVERTISED_Pause |
12249                                         ADVERTISED_Asym_Pause;
12250                         }
12251                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12252                         advertising |= ADVERTISED_Asym_Pause;
12253                 }
12254         }
12255         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12256                                                 advertising);
12257
12258         if (netif_running(dev) && tp->link_up) {
12259                 cmd->base.speed = tp->link_config.active_speed;
12260                 cmd->base.duplex = tp->link_config.active_duplex;
12261                 ethtool_convert_legacy_u32_to_link_mode(
12262                         cmd->link_modes.lp_advertising,
12263                         tp->link_config.rmt_adv);
12264
12265                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12266                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12267                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12268                         else
12269                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12270                 }
12271         } else {
12272                 cmd->base.speed = SPEED_UNKNOWN;
12273                 cmd->base.duplex = DUPLEX_UNKNOWN;
12274                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12275         }
12276         cmd->base.phy_address = tp->phy_addr;
12277         cmd->base.autoneg = tp->link_config.autoneg;
12278         return 0;
12279 }
12280
12281 static int tg3_set_link_ksettings(struct net_device *dev,
12282                                   const struct ethtool_link_ksettings *cmd)
12283 {
12284         struct tg3 *tp = netdev_priv(dev);
12285         u32 speed = cmd->base.speed;
12286         u32 advertising;
12287
12288         if (tg3_flag(tp, USE_PHYLIB)) {
12289                 struct phy_device *phydev;
12290                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12291                         return -EAGAIN;
12292                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12293                 return phy_ethtool_ksettings_set(phydev, cmd);
12294         }
12295
12296         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12297             cmd->base.autoneg != AUTONEG_DISABLE)
12298                 return -EINVAL;
12299
12300         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12301             cmd->base.duplex != DUPLEX_FULL &&
12302             cmd->base.duplex != DUPLEX_HALF)
12303                 return -EINVAL;
12304
12305         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12306                                                 cmd->link_modes.advertising);
12307
12308         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12309                 u32 mask = ADVERTISED_Autoneg |
12310                            ADVERTISED_Pause |
12311                            ADVERTISED_Asym_Pause;
12312
12313                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12314                         mask |= ADVERTISED_1000baseT_Half |
12315                                 ADVERTISED_1000baseT_Full;
12316
12317                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12318                         mask |= ADVERTISED_100baseT_Half |
12319                                 ADVERTISED_100baseT_Full |
12320                                 ADVERTISED_10baseT_Half |
12321                                 ADVERTISED_10baseT_Full |
12322                                 ADVERTISED_TP;
12323                 else
12324                         mask |= ADVERTISED_FIBRE;
12325
12326                 if (advertising & ~mask)
12327                         return -EINVAL;
12328
12329                 mask &= (ADVERTISED_1000baseT_Half |
12330                          ADVERTISED_1000baseT_Full |
12331                          ADVERTISED_100baseT_Half |
12332                          ADVERTISED_100baseT_Full |
12333                          ADVERTISED_10baseT_Half |
12334                          ADVERTISED_10baseT_Full);
12335
12336                 advertising &= mask;
12337         } else {
12338                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12339                         if (speed != SPEED_1000)
12340                                 return -EINVAL;
12341
12342                         if (cmd->base.duplex != DUPLEX_FULL)
12343                                 return -EINVAL;
12344                 } else {
12345                         if (speed != SPEED_100 &&
12346                             speed != SPEED_10)
12347                                 return -EINVAL;
12348                 }
12349         }
12350
12351         tg3_full_lock(tp, 0);
12352
12353         tp->link_config.autoneg = cmd->base.autoneg;
12354         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12355                 tp->link_config.advertising = (advertising |
12356                                               ADVERTISED_Autoneg);
12357                 tp->link_config.speed = SPEED_UNKNOWN;
12358                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12359         } else {
12360                 tp->link_config.advertising = 0;
12361                 tp->link_config.speed = speed;
12362                 tp->link_config.duplex = cmd->base.duplex;
12363         }
12364
12365         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12366
12367         tg3_warn_mgmt_link_flap(tp);
12368
12369         if (netif_running(dev))
12370                 tg3_setup_phy(tp, true);
12371
12372         tg3_full_unlock(tp);
12373
12374         return 0;
12375 }
12376
12377 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12378 {
12379         struct tg3 *tp = netdev_priv(dev);
12380
12381         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12382         strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12383         strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12384 }
12385
12386 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12387 {
12388         struct tg3 *tp = netdev_priv(dev);
12389
12390         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12391                 wol->supported = WAKE_MAGIC;
12392         else
12393                 wol->supported = 0;
12394         wol->wolopts = 0;
12395         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12396                 wol->wolopts = WAKE_MAGIC;
12397         memset(&wol->sopass, 0, sizeof(wol->sopass));
12398 }
12399
12400 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12401 {
12402         struct tg3 *tp = netdev_priv(dev);
12403         struct device *dp = &tp->pdev->dev;
12404
12405         if (wol->wolopts & ~WAKE_MAGIC)
12406                 return -EINVAL;
12407         if ((wol->wolopts & WAKE_MAGIC) &&
12408             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12409                 return -EINVAL;
12410
12411         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12412
12413         if (device_may_wakeup(dp))
12414                 tg3_flag_set(tp, WOL_ENABLE);
12415         else
12416                 tg3_flag_clear(tp, WOL_ENABLE);
12417
12418         return 0;
12419 }
12420
12421 static u32 tg3_get_msglevel(struct net_device *dev)
12422 {
12423         struct tg3 *tp = netdev_priv(dev);
12424         return tp->msg_enable;
12425 }
12426
12427 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430         tp->msg_enable = value;
12431 }
12432
12433 static int tg3_nway_reset(struct net_device *dev)
12434 {
12435         struct tg3 *tp = netdev_priv(dev);
12436         int r;
12437
12438         if (!netif_running(dev))
12439                 return -EAGAIN;
12440
12441         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12442                 return -EINVAL;
12443
12444         tg3_warn_mgmt_link_flap(tp);
12445
12446         if (tg3_flag(tp, USE_PHYLIB)) {
12447                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12448                         return -EAGAIN;
12449                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12450         } else {
12451                 u32 bmcr;
12452
12453                 spin_lock_bh(&tp->lock);
12454                 r = -EINVAL;
12455                 tg3_readphy(tp, MII_BMCR, &bmcr);
12456                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12457                     ((bmcr & BMCR_ANENABLE) ||
12458                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12459                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12460                                                    BMCR_ANENABLE);
12461                         r = 0;
12462                 }
12463                 spin_unlock_bh(&tp->lock);
12464         }
12465
12466         return r;
12467 }
12468
12469 static void tg3_get_ringparam(struct net_device *dev,
12470                               struct ethtool_ringparam *ering,
12471                               struct kernel_ethtool_ringparam *kernel_ering,
12472                               struct netlink_ext_ack *extack)
12473 {
12474         struct tg3 *tp = netdev_priv(dev);
12475
12476         ering->rx_max_pending = tp->rx_std_ring_mask;
12477         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12478                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12479         else
12480                 ering->rx_jumbo_max_pending = 0;
12481
12482         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12483
12484         ering->rx_pending = tp->rx_pending;
12485         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12486                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12487         else
12488                 ering->rx_jumbo_pending = 0;
12489
12490         ering->tx_pending = tp->napi[0].tx_pending;
12491 }
12492
12493 static int tg3_set_ringparam(struct net_device *dev,
12494                              struct ethtool_ringparam *ering,
12495                              struct kernel_ethtool_ringparam *kernel_ering,
12496                              struct netlink_ext_ack *extack)
12497 {
12498         struct tg3 *tp = netdev_priv(dev);
12499         int i, irq_sync = 0, err = 0;
12500         bool reset_phy = false;
12501
12502         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12503             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12504             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12505             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12506             (tg3_flag(tp, TSO_BUG) &&
12507              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12508                 return -EINVAL;
12509
12510         if (netif_running(dev)) {
12511                 tg3_phy_stop(tp);
12512                 tg3_netif_stop(tp);
12513                 irq_sync = 1;
12514         }
12515
12516         tg3_full_lock(tp, irq_sync);
12517
12518         tp->rx_pending = ering->rx_pending;
12519
12520         if (tg3_flag(tp, MAX_RXPEND_64) &&
12521             tp->rx_pending > 63)
12522                 tp->rx_pending = 63;
12523
12524         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12525                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12526
12527         for (i = 0; i < tp->irq_max; i++)
12528                 tp->napi[i].tx_pending = ering->tx_pending;
12529
12530         if (netif_running(dev)) {
12531                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12532                 /* Reset PHY to avoid PHY lock up */
12533                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12534                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12535                     tg3_asic_rev(tp) == ASIC_REV_5720)
12536                         reset_phy = true;
12537
12538                 err = tg3_restart_hw(tp, reset_phy);
12539                 if (!err)
12540                         tg3_netif_start(tp);
12541         }
12542
12543         tg3_full_unlock(tp);
12544
12545         if (irq_sync && !err)
12546                 tg3_phy_start(tp);
12547
12548         return err;
12549 }
12550
12551 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12552 {
12553         struct tg3 *tp = netdev_priv(dev);
12554
12555         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12556
12557         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12558                 epause->rx_pause = 1;
12559         else
12560                 epause->rx_pause = 0;
12561
12562         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12563                 epause->tx_pause = 1;
12564         else
12565                 epause->tx_pause = 0;
12566 }
12567
12568 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12569 {
12570         struct tg3 *tp = netdev_priv(dev);
12571         int err = 0;
12572         bool reset_phy = false;
12573
12574         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12575                 tg3_warn_mgmt_link_flap(tp);
12576
12577         if (tg3_flag(tp, USE_PHYLIB)) {
12578                 struct phy_device *phydev;
12579
12580                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12581
12582                 if (!phy_validate_pause(phydev, epause))
12583                         return -EINVAL;
12584
12585                 tp->link_config.flowctrl = 0;
12586                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12587                 if (epause->rx_pause) {
12588                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12589
12590                         if (epause->tx_pause) {
12591                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12592                         }
12593                 } else if (epause->tx_pause) {
12594                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12595                 }
12596
12597                 if (epause->autoneg)
12598                         tg3_flag_set(tp, PAUSE_AUTONEG);
12599                 else
12600                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12601
12602                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12603                         if (phydev->autoneg) {
12604                                 /* phy_set_asym_pause() will
12605                                  * renegotiate the link to inform our
12606                                  * link partner of our flow control
12607                                  * settings, even if the flow control
12608                                  * is forced.  Let tg3_adjust_link()
12609                                  * do the final flow control setup.
12610                                  */
12611                                 return 0;
12612                         }
12613
12614                         if (!epause->autoneg)
12615                                 tg3_setup_flow_control(tp, 0, 0);
12616                 }
12617         } else {
12618                 int irq_sync = 0;
12619
12620                 if (netif_running(dev)) {
12621                         tg3_netif_stop(tp);
12622                         irq_sync = 1;
12623                 }
12624
12625                 tg3_full_lock(tp, irq_sync);
12626
12627                 if (epause->autoneg)
12628                         tg3_flag_set(tp, PAUSE_AUTONEG);
12629                 else
12630                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12631                 if (epause->rx_pause)
12632                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12633                 else
12634                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12635                 if (epause->tx_pause)
12636                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12637                 else
12638                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12639
12640                 if (netif_running(dev)) {
12641                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12642                         /* Reset PHY to avoid PHY lock up */
12643                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12644                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12645                             tg3_asic_rev(tp) == ASIC_REV_5720)
12646                                 reset_phy = true;
12647
12648                         err = tg3_restart_hw(tp, reset_phy);
12649                         if (!err)
12650                                 tg3_netif_start(tp);
12651                 }
12652
12653                 tg3_full_unlock(tp);
12654         }
12655
12656         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12657
12658         return err;
12659 }
12660
12661 static int tg3_get_sset_count(struct net_device *dev, int sset)
12662 {
12663         switch (sset) {
12664         case ETH_SS_TEST:
12665                 return TG3_NUM_TEST;
12666         case ETH_SS_STATS:
12667                 return TG3_NUM_STATS;
12668         default:
12669                 return -EOPNOTSUPP;
12670         }
12671 }
12672
12673 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12674                          u32 *rules __always_unused)
12675 {
12676         struct tg3 *tp = netdev_priv(dev);
12677
12678         if (!tg3_flag(tp, SUPPORT_MSIX))
12679                 return -EOPNOTSUPP;
12680
12681         switch (info->cmd) {
12682         case ETHTOOL_GRXRINGS:
12683                 if (netif_running(tp->dev))
12684                         info->data = tp->rxq_cnt;
12685                 else {
12686                         info->data = num_online_cpus();
12687                         if (info->data > TG3_RSS_MAX_NUM_QS)
12688                                 info->data = TG3_RSS_MAX_NUM_QS;
12689                 }
12690
12691                 return 0;
12692
12693         default:
12694                 return -EOPNOTSUPP;
12695         }
12696 }
12697
12698 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12699 {
12700         u32 size = 0;
12701         struct tg3 *tp = netdev_priv(dev);
12702
12703         if (tg3_flag(tp, SUPPORT_MSIX))
12704                 size = TG3_RSS_INDIR_TBL_SIZE;
12705
12706         return size;
12707 }
12708
12709 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12710 {
12711         struct tg3 *tp = netdev_priv(dev);
12712         int i;
12713
12714         if (hfunc)
12715                 *hfunc = ETH_RSS_HASH_TOP;
12716         if (!indir)
12717                 return 0;
12718
12719         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12720                 indir[i] = tp->rss_ind_tbl[i];
12721
12722         return 0;
12723 }
12724
12725 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12726                         const u8 hfunc)
12727 {
12728         struct tg3 *tp = netdev_priv(dev);
12729         size_t i;
12730
12731         /* We require at least one supported parameter to be changed and no
12732          * change in any of the unsupported parameters
12733          */
12734         if (key ||
12735             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12736                 return -EOPNOTSUPP;
12737
12738         if (!indir)
12739                 return 0;
12740
12741         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12742                 tp->rss_ind_tbl[i] = indir[i];
12743
12744         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12745                 return 0;
12746
12747         /* It is legal to write the indirection
12748          * table while the device is running.
12749          */
12750         tg3_full_lock(tp, 0);
12751         tg3_rss_write_indir_tbl(tp);
12752         tg3_full_unlock(tp);
12753
12754         return 0;
12755 }
12756
12757 static void tg3_get_channels(struct net_device *dev,
12758                              struct ethtool_channels *channel)
12759 {
12760         struct tg3 *tp = netdev_priv(dev);
12761         u32 deflt_qs = netif_get_num_default_rss_queues();
12762
12763         channel->max_rx = tp->rxq_max;
12764         channel->max_tx = tp->txq_max;
12765
12766         if (netif_running(dev)) {
12767                 channel->rx_count = tp->rxq_cnt;
12768                 channel->tx_count = tp->txq_cnt;
12769         } else {
12770                 if (tp->rxq_req)
12771                         channel->rx_count = tp->rxq_req;
12772                 else
12773                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12774
12775                 if (tp->txq_req)
12776                         channel->tx_count = tp->txq_req;
12777                 else
12778                         channel->tx_count = min(deflt_qs, tp->txq_max);
12779         }
12780 }
12781
12782 static int tg3_set_channels(struct net_device *dev,
12783                             struct ethtool_channels *channel)
12784 {
12785         struct tg3 *tp = netdev_priv(dev);
12786
12787         if (!tg3_flag(tp, SUPPORT_MSIX))
12788                 return -EOPNOTSUPP;
12789
12790         if (channel->rx_count > tp->rxq_max ||
12791             channel->tx_count > tp->txq_max)
12792                 return -EINVAL;
12793
12794         tp->rxq_req = channel->rx_count;
12795         tp->txq_req = channel->tx_count;
12796
12797         if (!netif_running(dev))
12798                 return 0;
12799
12800         tg3_stop(tp);
12801
12802         tg3_carrier_off(tp);
12803
12804         tg3_start(tp, true, false, false);
12805
12806         return 0;
12807 }
12808
12809 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12810 {
12811         switch (stringset) {
12812         case ETH_SS_STATS:
12813                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12814                 break;
12815         case ETH_SS_TEST:
12816                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12817                 break;
12818         default:
12819                 WARN_ON(1);     /* we need a WARN() */
12820                 break;
12821         }
12822 }
12823
12824 static int tg3_set_phys_id(struct net_device *dev,
12825                             enum ethtool_phys_id_state state)
12826 {
12827         struct tg3 *tp = netdev_priv(dev);
12828
12829         switch (state) {
12830         case ETHTOOL_ID_ACTIVE:
12831                 return 1;       /* cycle on/off once per second */
12832
12833         case ETHTOOL_ID_ON:
12834                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12835                      LED_CTRL_1000MBPS_ON |
12836                      LED_CTRL_100MBPS_ON |
12837                      LED_CTRL_10MBPS_ON |
12838                      LED_CTRL_TRAFFIC_OVERRIDE |
12839                      LED_CTRL_TRAFFIC_BLINK |
12840                      LED_CTRL_TRAFFIC_LED);
12841                 break;
12842
12843         case ETHTOOL_ID_OFF:
12844                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12845                      LED_CTRL_TRAFFIC_OVERRIDE);
12846                 break;
12847
12848         case ETHTOOL_ID_INACTIVE:
12849                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12850                 break;
12851         }
12852
12853         return 0;
12854 }
12855
12856 static void tg3_get_ethtool_stats(struct net_device *dev,
12857                                    struct ethtool_stats *estats, u64 *tmp_stats)
12858 {
12859         struct tg3 *tp = netdev_priv(dev);
12860
12861         if (tp->hw_stats)
12862                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12863         else
12864                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12865 }
12866
12867 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12868 {
12869         int i;
12870         __be32 *buf;
12871         u32 offset = 0, len = 0;
12872         u32 magic, val;
12873
12874         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12875                 return NULL;
12876
12877         if (magic == TG3_EEPROM_MAGIC) {
12878                 for (offset = TG3_NVM_DIR_START;
12879                      offset < TG3_NVM_DIR_END;
12880                      offset += TG3_NVM_DIRENT_SIZE) {
12881                         if (tg3_nvram_read(tp, offset, &val))
12882                                 return NULL;
12883
12884                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12885                             TG3_NVM_DIRTYPE_EXTVPD)
12886                                 break;
12887                 }
12888
12889                 if (offset != TG3_NVM_DIR_END) {
12890                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12891                         if (tg3_nvram_read(tp, offset + 4, &offset))
12892                                 return NULL;
12893
12894                         offset = tg3_nvram_logical_addr(tp, offset);
12895                 }
12896
12897                 if (!offset || !len) {
12898                         offset = TG3_NVM_VPD_OFF;
12899                         len = TG3_NVM_VPD_LEN;
12900                 }
12901
12902                 buf = kmalloc(len, GFP_KERNEL);
12903                 if (!buf)
12904                         return NULL;
12905
12906                 for (i = 0; i < len; i += 4) {
12907                         /* The data is in little-endian format in NVRAM.
12908                          * Use the big-endian read routines to preserve
12909                          * the byte order as it exists in NVRAM.
12910                          */
12911                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12912                                 goto error;
12913                 }
12914                 *vpdlen = len;
12915         } else {
12916                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12917                 if (IS_ERR(buf))
12918                         return NULL;
12919         }
12920
12921         return buf;
12922
12923 error:
12924         kfree(buf);
12925         return NULL;
12926 }
12927
12928 #define NVRAM_TEST_SIZE 0x100
12929 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12930 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12931 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12932 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12933 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12934 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12935 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12936 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12937
12938 static int tg3_test_nvram(struct tg3 *tp)
12939 {
12940         u32 csum, magic;
12941         __be32 *buf;
12942         int i, j, k, err = 0, size;
12943         unsigned int len;
12944
12945         if (tg3_flag(tp, NO_NVRAM))
12946                 return 0;
12947
12948         if (tg3_nvram_read(tp, 0, &magic) != 0)
12949                 return -EIO;
12950
12951         if (magic == TG3_EEPROM_MAGIC)
12952                 size = NVRAM_TEST_SIZE;
12953         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12954                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12955                     TG3_EEPROM_SB_FORMAT_1) {
12956                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12957                         case TG3_EEPROM_SB_REVISION_0:
12958                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12959                                 break;
12960                         case TG3_EEPROM_SB_REVISION_2:
12961                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12962                                 break;
12963                         case TG3_EEPROM_SB_REVISION_3:
12964                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12965                                 break;
12966                         case TG3_EEPROM_SB_REVISION_4:
12967                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12968                                 break;
12969                         case TG3_EEPROM_SB_REVISION_5:
12970                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12971                                 break;
12972                         case TG3_EEPROM_SB_REVISION_6:
12973                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12974                                 break;
12975                         default:
12976                                 return -EIO;
12977                         }
12978                 } else
12979                         return 0;
12980         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12981                 size = NVRAM_SELFBOOT_HW_SIZE;
12982         else
12983                 return -EIO;
12984
12985         buf = kmalloc(size, GFP_KERNEL);
12986         if (buf == NULL)
12987                 return -ENOMEM;
12988
12989         err = -EIO;
12990         for (i = 0, j = 0; i < size; i += 4, j++) {
12991                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12992                 if (err)
12993                         break;
12994         }
12995         if (i < size)
12996                 goto out;
12997
12998         /* Selfboot format */
12999         magic = be32_to_cpu(buf[0]);
13000         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13001             TG3_EEPROM_MAGIC_FW) {
13002                 u8 *buf8 = (u8 *) buf, csum8 = 0;
13003
13004                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13005                     TG3_EEPROM_SB_REVISION_2) {
13006                         /* For rev 2, the csum doesn't include the MBA. */
13007                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13008                                 csum8 += buf8[i];
13009                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13010                                 csum8 += buf8[i];
13011                 } else {
13012                         for (i = 0; i < size; i++)
13013                                 csum8 += buf8[i];
13014                 }
13015
13016                 if (csum8 == 0) {
13017                         err = 0;
13018                         goto out;
13019                 }
13020
13021                 err = -EIO;
13022                 goto out;
13023         }
13024
13025         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13026             TG3_EEPROM_MAGIC_HW) {
13027                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13028                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13029                 u8 *buf8 = (u8 *) buf;
13030
13031                 /* Separate the parity bits and the data bytes.  */
13032                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13033                         if ((i == 0) || (i == 8)) {
13034                                 int l;
13035                                 u8 msk;
13036
13037                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13038                                         parity[k++] = buf8[i] & msk;
13039                                 i++;
13040                         } else if (i == 16) {
13041                                 int l;
13042                                 u8 msk;
13043
13044                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13045                                         parity[k++] = buf8[i] & msk;
13046                                 i++;
13047
13048                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13049                                         parity[k++] = buf8[i] & msk;
13050                                 i++;
13051                         }
13052                         data[j++] = buf8[i];
13053                 }
13054
13055                 err = -EIO;
13056                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13057                         u8 hw8 = hweight8(data[i]);
13058
13059                         if ((hw8 & 0x1) && parity[i])
13060                                 goto out;
13061                         else if (!(hw8 & 0x1) && !parity[i])
13062                                 goto out;
13063                 }
13064                 err = 0;
13065                 goto out;
13066         }
13067
13068         err = -EIO;
13069
13070         /* Bootstrap checksum at offset 0x10 */
13071         csum = calc_crc((unsigned char *) buf, 0x10);
13072         if (csum != le32_to_cpu(buf[0x10/4]))
13073                 goto out;
13074
13075         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13076         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13077         if (csum != le32_to_cpu(buf[0xfc/4]))
13078                 goto out;
13079
13080         kfree(buf);
13081
13082         buf = tg3_vpd_readblock(tp, &len);
13083         if (!buf)
13084                 return -ENOMEM;
13085
13086         err = pci_vpd_check_csum(buf, len);
13087         /* go on if no checksum found */
13088         if (err == 1)
13089                 err = 0;
13090 out:
13091         kfree(buf);
13092         return err;
13093 }
13094
13095 #define TG3_SERDES_TIMEOUT_SEC  2
13096 #define TG3_COPPER_TIMEOUT_SEC  6
13097
13098 static int tg3_test_link(struct tg3 *tp)
13099 {
13100         int i, max;
13101
13102         if (!netif_running(tp->dev))
13103                 return -ENODEV;
13104
13105         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13106                 max = TG3_SERDES_TIMEOUT_SEC;
13107         else
13108                 max = TG3_COPPER_TIMEOUT_SEC;
13109
13110         for (i = 0; i < max; i++) {
13111                 if (tp->link_up)
13112                         return 0;
13113
13114                 if (msleep_interruptible(1000))
13115                         break;
13116         }
13117
13118         return -EIO;
13119 }
13120
13121 /* Only test the commonly used registers */
13122 static int tg3_test_registers(struct tg3 *tp)
13123 {
13124         int i, is_5705, is_5750;
13125         u32 offset, read_mask, write_mask, val, save_val, read_val;
13126         static struct {
13127                 u16 offset;
13128                 u16 flags;
13129 #define TG3_FL_5705     0x1
13130 #define TG3_FL_NOT_5705 0x2
13131 #define TG3_FL_NOT_5788 0x4
13132 #define TG3_FL_NOT_5750 0x8
13133                 u32 read_mask;
13134                 u32 write_mask;
13135         } reg_tbl[] = {
13136                 /* MAC Control Registers */
13137                 { MAC_MODE, TG3_FL_NOT_5705,
13138                         0x00000000, 0x00ef6f8c },
13139                 { MAC_MODE, TG3_FL_5705,
13140                         0x00000000, 0x01ef6b8c },
13141                 { MAC_STATUS, TG3_FL_NOT_5705,
13142                         0x03800107, 0x00000000 },
13143                 { MAC_STATUS, TG3_FL_5705,
13144                         0x03800100, 0x00000000 },
13145                 { MAC_ADDR_0_HIGH, 0x0000,
13146                         0x00000000, 0x0000ffff },
13147                 { MAC_ADDR_0_LOW, 0x0000,
13148                         0x00000000, 0xffffffff },
13149                 { MAC_RX_MTU_SIZE, 0x0000,
13150                         0x00000000, 0x0000ffff },
13151                 { MAC_TX_MODE, 0x0000,
13152                         0x00000000, 0x00000070 },
13153                 { MAC_TX_LENGTHS, 0x0000,
13154                         0x00000000, 0x00003fff },
13155                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13156                         0x00000000, 0x000007fc },
13157                 { MAC_RX_MODE, TG3_FL_5705,
13158                         0x00000000, 0x000007dc },
13159                 { MAC_HASH_REG_0, 0x0000,
13160                         0x00000000, 0xffffffff },
13161                 { MAC_HASH_REG_1, 0x0000,
13162                         0x00000000, 0xffffffff },
13163                 { MAC_HASH_REG_2, 0x0000,
13164                         0x00000000, 0xffffffff },
13165                 { MAC_HASH_REG_3, 0x0000,
13166                         0x00000000, 0xffffffff },
13167
13168                 /* Receive Data and Receive BD Initiator Control Registers. */
13169                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13170                         0x00000000, 0xffffffff },
13171                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13172                         0x00000000, 0xffffffff },
13173                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13174                         0x00000000, 0x00000003 },
13175                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13176                         0x00000000, 0xffffffff },
13177                 { RCVDBDI_STD_BD+0, 0x0000,
13178                         0x00000000, 0xffffffff },
13179                 { RCVDBDI_STD_BD+4, 0x0000,
13180                         0x00000000, 0xffffffff },
13181                 { RCVDBDI_STD_BD+8, 0x0000,
13182                         0x00000000, 0xffff0002 },
13183                 { RCVDBDI_STD_BD+0xc, 0x0000,
13184                         0x00000000, 0xffffffff },
13185
13186                 /* Receive BD Initiator Control Registers. */
13187                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13188                         0x00000000, 0xffffffff },
13189                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13190                         0x00000000, 0x000003ff },
13191                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13192                         0x00000000, 0xffffffff },
13193
13194                 /* Host Coalescing Control Registers. */
13195                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13196                         0x00000000, 0x00000004 },
13197                 { HOSTCC_MODE, TG3_FL_5705,
13198                         0x00000000, 0x000000f6 },
13199                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13200                         0x00000000, 0xffffffff },
13201                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13202                         0x00000000, 0x000003ff },
13203                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13204                         0x00000000, 0xffffffff },
13205                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13206                         0x00000000, 0x000003ff },
13207                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13208                         0x00000000, 0xffffffff },
13209                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13210                         0x00000000, 0x000000ff },
13211                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13212                         0x00000000, 0xffffffff },
13213                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13214                         0x00000000, 0x000000ff },
13215                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13216                         0x00000000, 0xffffffff },
13217                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13218                         0x00000000, 0xffffffff },
13219                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13220                         0x00000000, 0xffffffff },
13221                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13222                         0x00000000, 0x000000ff },
13223                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13224                         0x00000000, 0xffffffff },
13225                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13226                         0x00000000, 0x000000ff },
13227                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13228                         0x00000000, 0xffffffff },
13229                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13230                         0x00000000, 0xffffffff },
13231                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13232                         0x00000000, 0xffffffff },
13233                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13234                         0x00000000, 0xffffffff },
13235                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13236                         0x00000000, 0xffffffff },
13237                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13238                         0xffffffff, 0x00000000 },
13239                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13240                         0xffffffff, 0x00000000 },
13241
13242                 /* Buffer Manager Control Registers. */
13243                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13244                         0x00000000, 0x007fff80 },
13245                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13246                         0x00000000, 0x007fffff },
13247                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13248                         0x00000000, 0x0000003f },
13249                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13250                         0x00000000, 0x000001ff },
13251                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13252                         0x00000000, 0x000001ff },
13253                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13254                         0xffffffff, 0x00000000 },
13255                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13256                         0xffffffff, 0x00000000 },
13257
13258                 /* Mailbox Registers */
13259                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13260                         0x00000000, 0x000001ff },
13261                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13262                         0x00000000, 0x000001ff },
13263                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13264                         0x00000000, 0x000007ff },
13265                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13266                         0x00000000, 0x000001ff },
13267
13268                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13269         };
13270
13271         is_5705 = is_5750 = 0;
13272         if (tg3_flag(tp, 5705_PLUS)) {
13273                 is_5705 = 1;
13274                 if (tg3_flag(tp, 5750_PLUS))
13275                         is_5750 = 1;
13276         }
13277
13278         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13279                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13280                         continue;
13281
13282                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13283                         continue;
13284
13285                 if (tg3_flag(tp, IS_5788) &&
13286                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13287                         continue;
13288
13289                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13290                         continue;
13291
13292                 offset = (u32) reg_tbl[i].offset;
13293                 read_mask = reg_tbl[i].read_mask;
13294                 write_mask = reg_tbl[i].write_mask;
13295
13296                 /* Save the original register content */
13297                 save_val = tr32(offset);
13298
13299                 /* Determine the read-only value. */
13300                 read_val = save_val & read_mask;
13301
13302                 /* Write zero to the register, then make sure the read-only bits
13303                  * are not changed and the read/write bits are all zeros.
13304                  */
13305                 tw32(offset, 0);
13306
13307                 val = tr32(offset);
13308
13309                 /* Test the read-only and read/write bits. */
13310                 if (((val & read_mask) != read_val) || (val & write_mask))
13311                         goto out;
13312
13313                 /* Write ones to all the bits defined by RdMask and WrMask, then
13314                  * make sure the read-only bits are not changed and the
13315                  * read/write bits are all ones.
13316                  */
13317                 tw32(offset, read_mask | write_mask);
13318
13319                 val = tr32(offset);
13320
13321                 /* Test the read-only bits. */
13322                 if ((val & read_mask) != read_val)
13323                         goto out;
13324
13325                 /* Test the read/write bits. */
13326                 if ((val & write_mask) != write_mask)
13327                         goto out;
13328
13329                 tw32(offset, save_val);
13330         }
13331
13332         return 0;
13333
13334 out:
13335         if (netif_msg_hw(tp))
13336                 netdev_err(tp->dev,
13337                            "Register test failed at offset %x\n", offset);
13338         tw32(offset, save_val);
13339         return -EIO;
13340 }
13341
13342 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13343 {
13344         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13345         int i;
13346         u32 j;
13347
13348         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13349                 for (j = 0; j < len; j += 4) {
13350                         u32 val;
13351
13352                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13353                         tg3_read_mem(tp, offset + j, &val);
13354                         if (val != test_pattern[i])
13355                                 return -EIO;
13356                 }
13357         }
13358         return 0;
13359 }
13360
13361 static int tg3_test_memory(struct tg3 *tp)
13362 {
13363         static struct mem_entry {
13364                 u32 offset;
13365                 u32 len;
13366         } mem_tbl_570x[] = {
13367                 { 0x00000000, 0x00b50},
13368                 { 0x00002000, 0x1c000},
13369                 { 0xffffffff, 0x00000}
13370         }, mem_tbl_5705[] = {
13371                 { 0x00000100, 0x0000c},
13372                 { 0x00000200, 0x00008},
13373                 { 0x00004000, 0x00800},
13374                 { 0x00006000, 0x01000},
13375                 { 0x00008000, 0x02000},
13376                 { 0x00010000, 0x0e000},
13377                 { 0xffffffff, 0x00000}
13378         }, mem_tbl_5755[] = {
13379                 { 0x00000200, 0x00008},
13380                 { 0x00004000, 0x00800},
13381                 { 0x00006000, 0x00800},
13382                 { 0x00008000, 0x02000},
13383                 { 0x00010000, 0x0c000},
13384                 { 0xffffffff, 0x00000}
13385         }, mem_tbl_5906[] = {
13386                 { 0x00000200, 0x00008},
13387                 { 0x00004000, 0x00400},
13388                 { 0x00006000, 0x00400},
13389                 { 0x00008000, 0x01000},
13390                 { 0x00010000, 0x01000},
13391                 { 0xffffffff, 0x00000}
13392         }, mem_tbl_5717[] = {
13393                 { 0x00000200, 0x00008},
13394                 { 0x00010000, 0x0a000},
13395                 { 0x00020000, 0x13c00},
13396                 { 0xffffffff, 0x00000}
13397         }, mem_tbl_57765[] = {
13398                 { 0x00000200, 0x00008},
13399                 { 0x00004000, 0x00800},
13400                 { 0x00006000, 0x09800},
13401                 { 0x00010000, 0x0a000},
13402                 { 0xffffffff, 0x00000}
13403         };
13404         struct mem_entry *mem_tbl;
13405         int err = 0;
13406         int i;
13407
13408         if (tg3_flag(tp, 5717_PLUS))
13409                 mem_tbl = mem_tbl_5717;
13410         else if (tg3_flag(tp, 57765_CLASS) ||
13411                  tg3_asic_rev(tp) == ASIC_REV_5762)
13412                 mem_tbl = mem_tbl_57765;
13413         else if (tg3_flag(tp, 5755_PLUS))
13414                 mem_tbl = mem_tbl_5755;
13415         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13416                 mem_tbl = mem_tbl_5906;
13417         else if (tg3_flag(tp, 5705_PLUS))
13418                 mem_tbl = mem_tbl_5705;
13419         else
13420                 mem_tbl = mem_tbl_570x;
13421
13422         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13423                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13424                 if (err)
13425                         break;
13426         }
13427
13428         return err;
13429 }
13430
13431 #define TG3_TSO_MSS             500
13432
13433 #define TG3_TSO_IP_HDR_LEN      20
13434 #define TG3_TSO_TCP_HDR_LEN     20
13435 #define TG3_TSO_TCP_OPT_LEN     12
13436
13437 static const u8 tg3_tso_header[] = {
13438 0x08, 0x00,
13439 0x45, 0x00, 0x00, 0x00,
13440 0x00, 0x00, 0x40, 0x00,
13441 0x40, 0x06, 0x00, 0x00,
13442 0x0a, 0x00, 0x00, 0x01,
13443 0x0a, 0x00, 0x00, 0x02,
13444 0x0d, 0x00, 0xe0, 0x00,
13445 0x00, 0x00, 0x01, 0x00,
13446 0x00, 0x00, 0x02, 0x00,
13447 0x80, 0x10, 0x10, 0x00,
13448 0x14, 0x09, 0x00, 0x00,
13449 0x01, 0x01, 0x08, 0x0a,
13450 0x11, 0x11, 0x11, 0x11,
13451 0x11, 0x11, 0x11, 0x11,
13452 };
13453
13454 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13455 {
13456         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13457         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13458         u32 budget;
13459         struct sk_buff *skb;
13460         u8 *tx_data, *rx_data;
13461         dma_addr_t map;
13462         int num_pkts, tx_len, rx_len, i, err;
13463         struct tg3_rx_buffer_desc *desc;
13464         struct tg3_napi *tnapi, *rnapi;
13465         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13466
13467         tnapi = &tp->napi[0];
13468         rnapi = &tp->napi[0];
13469         if (tp->irq_cnt > 1) {
13470                 if (tg3_flag(tp, ENABLE_RSS))
13471                         rnapi = &tp->napi[1];
13472                 if (tg3_flag(tp, ENABLE_TSS))
13473                         tnapi = &tp->napi[1];
13474         }
13475         coal_now = tnapi->coal_now | rnapi->coal_now;
13476
13477         err = -EIO;
13478
13479         tx_len = pktsz;
13480         skb = netdev_alloc_skb(tp->dev, tx_len);
13481         if (!skb)
13482                 return -ENOMEM;
13483
13484         tx_data = skb_put(skb, tx_len);
13485         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13486         memset(tx_data + ETH_ALEN, 0x0, 8);
13487
13488         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13489
13490         if (tso_loopback) {
13491                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13492
13493                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13494                               TG3_TSO_TCP_OPT_LEN;
13495
13496                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13497                        sizeof(tg3_tso_header));
13498                 mss = TG3_TSO_MSS;
13499
13500                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13501                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13502
13503                 /* Set the total length field in the IP header */
13504                 iph->tot_len = htons((u16)(mss + hdr_len));
13505
13506                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13507                               TXD_FLAG_CPU_POST_DMA);
13508
13509                 if (tg3_flag(tp, HW_TSO_1) ||
13510                     tg3_flag(tp, HW_TSO_2) ||
13511                     tg3_flag(tp, HW_TSO_3)) {
13512                         struct tcphdr *th;
13513                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13514                         th = (struct tcphdr *)&tx_data[val];
13515                         th->check = 0;
13516                 } else
13517                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13518
13519                 if (tg3_flag(tp, HW_TSO_3)) {
13520                         mss |= (hdr_len & 0xc) << 12;
13521                         if (hdr_len & 0x10)
13522                                 base_flags |= 0x00000010;
13523                         base_flags |= (hdr_len & 0x3e0) << 5;
13524                 } else if (tg3_flag(tp, HW_TSO_2))
13525                         mss |= hdr_len << 9;
13526                 else if (tg3_flag(tp, HW_TSO_1) ||
13527                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13528                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13529                 } else {
13530                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13531                 }
13532
13533                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13534         } else {
13535                 num_pkts = 1;
13536                 data_off = ETH_HLEN;
13537
13538                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13539                     tx_len > VLAN_ETH_FRAME_LEN)
13540                         base_flags |= TXD_FLAG_JMB_PKT;
13541         }
13542
13543         for (i = data_off; i < tx_len; i++)
13544                 tx_data[i] = (u8) (i & 0xff);
13545
13546         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13547         if (dma_mapping_error(&tp->pdev->dev, map)) {
13548                 dev_kfree_skb(skb);
13549                 return -EIO;
13550         }
13551
13552         val = tnapi->tx_prod;
13553         tnapi->tx_buffers[val].skb = skb;
13554         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13555
13556         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13557                rnapi->coal_now);
13558
13559         udelay(10);
13560
13561         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13562
13563         budget = tg3_tx_avail(tnapi);
13564         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13565                             base_flags | TXD_FLAG_END, mss, 0)) {
13566                 tnapi->tx_buffers[val].skb = NULL;
13567                 dev_kfree_skb(skb);
13568                 return -EIO;
13569         }
13570
13571         tnapi->tx_prod++;
13572
13573         /* Sync BD data before updating mailbox */
13574         wmb();
13575
13576         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13577         tr32_mailbox(tnapi->prodmbox);
13578
13579         udelay(10);
13580
13581         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13582         for (i = 0; i < 35; i++) {
13583                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13584                        coal_now);
13585
13586                 udelay(10);
13587
13588                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13589                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13590                 if ((tx_idx == tnapi->tx_prod) &&
13591                     (rx_idx == (rx_start_idx + num_pkts)))
13592                         break;
13593         }
13594
13595         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13596         dev_kfree_skb(skb);
13597
13598         if (tx_idx != tnapi->tx_prod)
13599                 goto out;
13600
13601         if (rx_idx != rx_start_idx + num_pkts)
13602                 goto out;
13603
13604         val = data_off;
13605         while (rx_idx != rx_start_idx) {
13606                 desc = &rnapi->rx_rcb[rx_start_idx++];
13607                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13608                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13609
13610                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13611                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13612                         goto out;
13613
13614                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13615                          - ETH_FCS_LEN;
13616
13617                 if (!tso_loopback) {
13618                         if (rx_len != tx_len)
13619                                 goto out;
13620
13621                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13622                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13623                                         goto out;
13624                         } else {
13625                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13626                                         goto out;
13627                         }
13628                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13629                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13630                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13631                         goto out;
13632                 }
13633
13634                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13635                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13636                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13637                                              mapping);
13638                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13639                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13640                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13641                                              mapping);
13642                 } else
13643                         goto out;
13644
13645                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13646                                         DMA_FROM_DEVICE);
13647
13648                 rx_data += TG3_RX_OFFSET(tp);
13649                 for (i = data_off; i < rx_len; i++, val++) {
13650                         if (*(rx_data + i) != (u8) (val & 0xff))
13651                                 goto out;
13652                 }
13653         }
13654
13655         err = 0;
13656
13657         /* tg3_free_rings will unmap and free the rx_data */
13658 out:
13659         return err;
13660 }
13661
13662 #define TG3_STD_LOOPBACK_FAILED         1
13663 #define TG3_JMB_LOOPBACK_FAILED         2
13664 #define TG3_TSO_LOOPBACK_FAILED         4
13665 #define TG3_LOOPBACK_FAILED \
13666         (TG3_STD_LOOPBACK_FAILED | \
13667          TG3_JMB_LOOPBACK_FAILED | \
13668          TG3_TSO_LOOPBACK_FAILED)
13669
13670 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13671 {
13672         int err = -EIO;
13673         u32 eee_cap;
13674         u32 jmb_pkt_sz = 9000;
13675
13676         if (tp->dma_limit)
13677                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13678
13679         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13680         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13681
13682         if (!netif_running(tp->dev)) {
13683                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13684                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13685                 if (do_extlpbk)
13686                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13687                 goto done;
13688         }
13689
13690         err = tg3_reset_hw(tp, true);
13691         if (err) {
13692                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13693                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13694                 if (do_extlpbk)
13695                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13696                 goto done;
13697         }
13698
13699         if (tg3_flag(tp, ENABLE_RSS)) {
13700                 int i;
13701
13702                 /* Reroute all rx packets to the 1st queue */
13703                 for (i = MAC_RSS_INDIR_TBL_0;
13704                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13705                         tw32(i, 0x0);
13706         }
13707
13708         /* HW errata - mac loopback fails in some cases on 5780.
13709          * Normal traffic and PHY loopback are not affected by
13710          * errata.  Also, the MAC loopback test is deprecated for
13711          * all newer ASIC revisions.
13712          */
13713         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13714             !tg3_flag(tp, CPMU_PRESENT)) {
13715                 tg3_mac_loopback(tp, true);
13716
13717                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13719
13720                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13721                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13722                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13723
13724                 tg3_mac_loopback(tp, false);
13725         }
13726
13727         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13728             !tg3_flag(tp, USE_PHYLIB)) {
13729                 int i;
13730
13731                 tg3_phy_lpbk_set(tp, 0, false);
13732
13733                 /* Wait for link */
13734                 for (i = 0; i < 100; i++) {
13735                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13736                                 break;
13737                         mdelay(1);
13738                 }
13739
13740                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13741                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13742                 if (tg3_flag(tp, TSO_CAPABLE) &&
13743                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13744                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13745                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13746                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13747                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13748
13749                 if (do_extlpbk) {
13750                         tg3_phy_lpbk_set(tp, 0, true);
13751
13752                         /* All link indications report up, but the hardware
13753                          * isn't really ready for about 20 msec.  Double it
13754                          * to be sure.
13755                          */
13756                         mdelay(40);
13757
13758                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13759                                 data[TG3_EXT_LOOPB_TEST] |=
13760                                                         TG3_STD_LOOPBACK_FAILED;
13761                         if (tg3_flag(tp, TSO_CAPABLE) &&
13762                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13763                                 data[TG3_EXT_LOOPB_TEST] |=
13764                                                         TG3_TSO_LOOPBACK_FAILED;
13765                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13766                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13767                                 data[TG3_EXT_LOOPB_TEST] |=
13768                                                         TG3_JMB_LOOPBACK_FAILED;
13769                 }
13770
13771                 /* Re-enable gphy autopowerdown. */
13772                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13773                         tg3_phy_toggle_apd(tp, true);
13774         }
13775
13776         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13777                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13778
13779 done:
13780         tp->phy_flags |= eee_cap;
13781
13782         return err;
13783 }
13784
13785 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13786                           u64 *data)
13787 {
13788         struct tg3 *tp = netdev_priv(dev);
13789         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13790
13791         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13792                 if (tg3_power_up(tp)) {
13793                         etest->flags |= ETH_TEST_FL_FAILED;
13794                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13795                         return;
13796                 }
13797                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13798         }
13799
13800         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13801
13802         if (tg3_test_nvram(tp) != 0) {
13803                 etest->flags |= ETH_TEST_FL_FAILED;
13804                 data[TG3_NVRAM_TEST] = 1;
13805         }
13806         if (!doextlpbk && tg3_test_link(tp)) {
13807                 etest->flags |= ETH_TEST_FL_FAILED;
13808                 data[TG3_LINK_TEST] = 1;
13809         }
13810         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13811                 int err, err2 = 0, irq_sync = 0;
13812
13813                 if (netif_running(dev)) {
13814                         tg3_phy_stop(tp);
13815                         tg3_netif_stop(tp);
13816                         irq_sync = 1;
13817                 }
13818
13819                 tg3_full_lock(tp, irq_sync);
13820                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13821                 err = tg3_nvram_lock(tp);
13822                 tg3_halt_cpu(tp, RX_CPU_BASE);
13823                 if (!tg3_flag(tp, 5705_PLUS))
13824                         tg3_halt_cpu(tp, TX_CPU_BASE);
13825                 if (!err)
13826                         tg3_nvram_unlock(tp);
13827
13828                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13829                         tg3_phy_reset(tp);
13830
13831                 if (tg3_test_registers(tp) != 0) {
13832                         etest->flags |= ETH_TEST_FL_FAILED;
13833                         data[TG3_REGISTER_TEST] = 1;
13834                 }
13835
13836                 if (tg3_test_memory(tp) != 0) {
13837                         etest->flags |= ETH_TEST_FL_FAILED;
13838                         data[TG3_MEMORY_TEST] = 1;
13839                 }
13840
13841                 if (doextlpbk)
13842                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13843
13844                 if (tg3_test_loopback(tp, data, doextlpbk))
13845                         etest->flags |= ETH_TEST_FL_FAILED;
13846
13847                 tg3_full_unlock(tp);
13848
13849                 if (tg3_test_interrupt(tp) != 0) {
13850                         etest->flags |= ETH_TEST_FL_FAILED;
13851                         data[TG3_INTERRUPT_TEST] = 1;
13852                 }
13853
13854                 tg3_full_lock(tp, 0);
13855
13856                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13857                 if (netif_running(dev)) {
13858                         tg3_flag_set(tp, INIT_COMPLETE);
13859                         err2 = tg3_restart_hw(tp, true);
13860                         if (!err2)
13861                                 tg3_netif_start(tp);
13862                 }
13863
13864                 tg3_full_unlock(tp);
13865
13866                 if (irq_sync && !err2)
13867                         tg3_phy_start(tp);
13868         }
13869         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13870                 tg3_power_down_prepare(tp);
13871
13872 }
13873
13874 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13875 {
13876         struct tg3 *tp = netdev_priv(dev);
13877         struct hwtstamp_config stmpconf;
13878
13879         if (!tg3_flag(tp, PTP_CAPABLE))
13880                 return -EOPNOTSUPP;
13881
13882         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13883                 return -EFAULT;
13884
13885         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13886             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13887                 return -ERANGE;
13888
13889         switch (stmpconf.rx_filter) {
13890         case HWTSTAMP_FILTER_NONE:
13891                 tp->rxptpctl = 0;
13892                 break;
13893         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13894                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13895                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13896                 break;
13897         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13898                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13899                                TG3_RX_PTP_CTL_SYNC_EVNT;
13900                 break;
13901         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13902                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13903                                TG3_RX_PTP_CTL_DELAY_REQ;
13904                 break;
13905         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13906                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13907                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13908                 break;
13909         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13910                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13911                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13912                 break;
13913         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13914                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13915                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13916                 break;
13917         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13918                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13919                                TG3_RX_PTP_CTL_SYNC_EVNT;
13920                 break;
13921         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13922                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13923                                TG3_RX_PTP_CTL_SYNC_EVNT;
13924                 break;
13925         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13926                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13927                                TG3_RX_PTP_CTL_SYNC_EVNT;
13928                 break;
13929         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13930                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13931                                TG3_RX_PTP_CTL_DELAY_REQ;
13932                 break;
13933         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13934                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13935                                TG3_RX_PTP_CTL_DELAY_REQ;
13936                 break;
13937         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13938                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13939                                TG3_RX_PTP_CTL_DELAY_REQ;
13940                 break;
13941         default:
13942                 return -ERANGE;
13943         }
13944
13945         if (netif_running(dev) && tp->rxptpctl)
13946                 tw32(TG3_RX_PTP_CTL,
13947                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13948
13949         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13950                 tg3_flag_set(tp, TX_TSTAMP_EN);
13951         else
13952                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13953
13954         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13955                 -EFAULT : 0;
13956 }
13957
13958 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13959 {
13960         struct tg3 *tp = netdev_priv(dev);
13961         struct hwtstamp_config stmpconf;
13962
13963         if (!tg3_flag(tp, PTP_CAPABLE))
13964                 return -EOPNOTSUPP;
13965
13966         stmpconf.flags = 0;
13967         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13968                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13969
13970         switch (tp->rxptpctl) {
13971         case 0:
13972                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13973                 break;
13974         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13975                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13976                 break;
13977         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13978                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13979                 break;
13980         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13981                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13982                 break;
13983         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13984                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13985                 break;
13986         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13987                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13988                 break;
13989         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13990                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13991                 break;
13992         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13993                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13994                 break;
13995         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13996                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13997                 break;
13998         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13999                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14000                 break;
14001         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14002                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14003                 break;
14004         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14005                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14006                 break;
14007         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14008                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14009                 break;
14010         default:
14011                 WARN_ON_ONCE(1);
14012                 return -ERANGE;
14013         }
14014
14015         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14016                 -EFAULT : 0;
14017 }
14018
14019 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14020 {
14021         struct mii_ioctl_data *data = if_mii(ifr);
14022         struct tg3 *tp = netdev_priv(dev);
14023         int err;
14024
14025         if (tg3_flag(tp, USE_PHYLIB)) {
14026                 struct phy_device *phydev;
14027                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14028                         return -EAGAIN;
14029                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14030                 return phy_mii_ioctl(phydev, ifr, cmd);
14031         }
14032
14033         switch (cmd) {
14034         case SIOCGMIIPHY:
14035                 data->phy_id = tp->phy_addr;
14036
14037                 fallthrough;
14038         case SIOCGMIIREG: {
14039                 u32 mii_regval;
14040
14041                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14042                         break;                  /* We have no PHY */
14043
14044                 if (!netif_running(dev))
14045                         return -EAGAIN;
14046
14047                 spin_lock_bh(&tp->lock);
14048                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14049                                     data->reg_num & 0x1f, &mii_regval);
14050                 spin_unlock_bh(&tp->lock);
14051
14052                 data->val_out = mii_regval;
14053
14054                 return err;
14055         }
14056
14057         case SIOCSMIIREG:
14058                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14059                         break;                  /* We have no PHY */
14060
14061                 if (!netif_running(dev))
14062                         return -EAGAIN;
14063
14064                 spin_lock_bh(&tp->lock);
14065                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14066                                      data->reg_num & 0x1f, data->val_in);
14067                 spin_unlock_bh(&tp->lock);
14068
14069                 return err;
14070
14071         case SIOCSHWTSTAMP:
14072                 return tg3_hwtstamp_set(dev, ifr);
14073
14074         case SIOCGHWTSTAMP:
14075                 return tg3_hwtstamp_get(dev, ifr);
14076
14077         default:
14078                 /* do nothing */
14079                 break;
14080         }
14081         return -EOPNOTSUPP;
14082 }
14083
14084 static int tg3_get_coalesce(struct net_device *dev,
14085                             struct ethtool_coalesce *ec,
14086                             struct kernel_ethtool_coalesce *kernel_coal,
14087                             struct netlink_ext_ack *extack)
14088 {
14089         struct tg3 *tp = netdev_priv(dev);
14090
14091         memcpy(ec, &tp->coal, sizeof(*ec));
14092         return 0;
14093 }
14094
14095 static int tg3_set_coalesce(struct net_device *dev,
14096                             struct ethtool_coalesce *ec,
14097                             struct kernel_ethtool_coalesce *kernel_coal,
14098                             struct netlink_ext_ack *extack)
14099 {
14100         struct tg3 *tp = netdev_priv(dev);
14101         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14102         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14103
14104         if (!tg3_flag(tp, 5705_PLUS)) {
14105                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14106                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14107                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14108                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14109         }
14110
14111         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14112             (!ec->rx_coalesce_usecs) ||
14113             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14114             (!ec->tx_coalesce_usecs) ||
14115             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14116             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14117             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14118             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14119             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14120             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14121             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14122             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14123                 return -EINVAL;
14124
14125         /* Only copy relevant parameters, ignore all others. */
14126         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14127         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14128         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14129         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14130         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14131         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14132         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14133         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14134         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14135
14136         if (netif_running(dev)) {
14137                 tg3_full_lock(tp, 0);
14138                 __tg3_set_coalesce(tp, &tp->coal);
14139                 tg3_full_unlock(tp);
14140         }
14141         return 0;
14142 }
14143
14144 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14145 {
14146         struct tg3 *tp = netdev_priv(dev);
14147
14148         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14149                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14150                 return -EOPNOTSUPP;
14151         }
14152
14153         if (edata->advertised != tp->eee.advertised) {
14154                 netdev_warn(tp->dev,
14155                             "Direct manipulation of EEE advertisement is not supported\n");
14156                 return -EINVAL;
14157         }
14158
14159         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14160                 netdev_warn(tp->dev,
14161                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14162                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14163                 return -EINVAL;
14164         }
14165
14166         tp->eee = *edata;
14167
14168         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14169         tg3_warn_mgmt_link_flap(tp);
14170
14171         if (netif_running(tp->dev)) {
14172                 tg3_full_lock(tp, 0);
14173                 tg3_setup_eee(tp);
14174                 tg3_phy_reset(tp);
14175                 tg3_full_unlock(tp);
14176         }
14177
14178         return 0;
14179 }
14180
14181 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14182 {
14183         struct tg3 *tp = netdev_priv(dev);
14184
14185         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14186                 netdev_warn(tp->dev,
14187                             "Board does not support EEE!\n");
14188                 return -EOPNOTSUPP;
14189         }
14190
14191         *edata = tp->eee;
14192         return 0;
14193 }
14194
14195 static const struct ethtool_ops tg3_ethtool_ops = {
14196         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14197                                      ETHTOOL_COALESCE_MAX_FRAMES |
14198                                      ETHTOOL_COALESCE_USECS_IRQ |
14199                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14200                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14201         .get_drvinfo            = tg3_get_drvinfo,
14202         .get_regs_len           = tg3_get_regs_len,
14203         .get_regs               = tg3_get_regs,
14204         .get_wol                = tg3_get_wol,
14205         .set_wol                = tg3_set_wol,
14206         .get_msglevel           = tg3_get_msglevel,
14207         .set_msglevel           = tg3_set_msglevel,
14208         .nway_reset             = tg3_nway_reset,
14209         .get_link               = ethtool_op_get_link,
14210         .get_eeprom_len         = tg3_get_eeprom_len,
14211         .get_eeprom             = tg3_get_eeprom,
14212         .set_eeprom             = tg3_set_eeprom,
14213         .get_ringparam          = tg3_get_ringparam,
14214         .set_ringparam          = tg3_set_ringparam,
14215         .get_pauseparam         = tg3_get_pauseparam,
14216         .set_pauseparam         = tg3_set_pauseparam,
14217         .self_test              = tg3_self_test,
14218         .get_strings            = tg3_get_strings,
14219         .set_phys_id            = tg3_set_phys_id,
14220         .get_ethtool_stats      = tg3_get_ethtool_stats,
14221         .get_coalesce           = tg3_get_coalesce,
14222         .set_coalesce           = tg3_set_coalesce,
14223         .get_sset_count         = tg3_get_sset_count,
14224         .get_rxnfc              = tg3_get_rxnfc,
14225         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14226         .get_rxfh               = tg3_get_rxfh,
14227         .set_rxfh               = tg3_set_rxfh,
14228         .get_channels           = tg3_get_channels,
14229         .set_channels           = tg3_set_channels,
14230         .get_ts_info            = tg3_get_ts_info,
14231         .get_eee                = tg3_get_eee,
14232         .set_eee                = tg3_set_eee,
14233         .get_link_ksettings     = tg3_get_link_ksettings,
14234         .set_link_ksettings     = tg3_set_link_ksettings,
14235 };
14236
14237 static void tg3_get_stats64(struct net_device *dev,
14238                             struct rtnl_link_stats64 *stats)
14239 {
14240         struct tg3 *tp = netdev_priv(dev);
14241
14242         spin_lock_bh(&tp->lock);
14243         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14244                 *stats = tp->net_stats_prev;
14245                 spin_unlock_bh(&tp->lock);
14246                 return;
14247         }
14248
14249         tg3_get_nstats(tp, stats);
14250         spin_unlock_bh(&tp->lock);
14251 }
14252
14253 static void tg3_set_rx_mode(struct net_device *dev)
14254 {
14255         struct tg3 *tp = netdev_priv(dev);
14256
14257         if (!netif_running(dev))
14258                 return;
14259
14260         tg3_full_lock(tp, 0);
14261         __tg3_set_rx_mode(dev);
14262         tg3_full_unlock(tp);
14263 }
14264
14265 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14266                                int new_mtu)
14267 {
14268         dev->mtu = new_mtu;
14269
14270         if (new_mtu > ETH_DATA_LEN) {
14271                 if (tg3_flag(tp, 5780_CLASS)) {
14272                         netdev_update_features(dev);
14273                         tg3_flag_clear(tp, TSO_CAPABLE);
14274                 } else {
14275                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14276                 }
14277         } else {
14278                 if (tg3_flag(tp, 5780_CLASS)) {
14279                         tg3_flag_set(tp, TSO_CAPABLE);
14280                         netdev_update_features(dev);
14281                 }
14282                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14283         }
14284 }
14285
14286 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14287 {
14288         struct tg3 *tp = netdev_priv(dev);
14289         int err;
14290         bool reset_phy = false;
14291
14292         if (!netif_running(dev)) {
14293                 /* We'll just catch it later when the
14294                  * device is up'd.
14295                  */
14296                 tg3_set_mtu(dev, tp, new_mtu);
14297                 return 0;
14298         }
14299
14300         tg3_phy_stop(tp);
14301
14302         tg3_netif_stop(tp);
14303
14304         tg3_set_mtu(dev, tp, new_mtu);
14305
14306         tg3_full_lock(tp, 1);
14307
14308         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14309
14310         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14311          * breaks all requests to 256 bytes.
14312          */
14313         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14314             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14315             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14316             tg3_asic_rev(tp) == ASIC_REV_5720)
14317                 reset_phy = true;
14318
14319         err = tg3_restart_hw(tp, reset_phy);
14320
14321         if (!err)
14322                 tg3_netif_start(tp);
14323
14324         tg3_full_unlock(tp);
14325
14326         if (!err)
14327                 tg3_phy_start(tp);
14328
14329         return err;
14330 }
14331
14332 static const struct net_device_ops tg3_netdev_ops = {
14333         .ndo_open               = tg3_open,
14334         .ndo_stop               = tg3_close,
14335         .ndo_start_xmit         = tg3_start_xmit,
14336         .ndo_get_stats64        = tg3_get_stats64,
14337         .ndo_validate_addr      = eth_validate_addr,
14338         .ndo_set_rx_mode        = tg3_set_rx_mode,
14339         .ndo_set_mac_address    = tg3_set_mac_addr,
14340         .ndo_eth_ioctl          = tg3_ioctl,
14341         .ndo_tx_timeout         = tg3_tx_timeout,
14342         .ndo_change_mtu         = tg3_change_mtu,
14343         .ndo_fix_features       = tg3_fix_features,
14344         .ndo_set_features       = tg3_set_features,
14345 #ifdef CONFIG_NET_POLL_CONTROLLER
14346         .ndo_poll_controller    = tg3_poll_controller,
14347 #endif
14348 };
14349
14350 static void tg3_get_eeprom_size(struct tg3 *tp)
14351 {
14352         u32 cursize, val, magic;
14353
14354         tp->nvram_size = EEPROM_CHIP_SIZE;
14355
14356         if (tg3_nvram_read(tp, 0, &magic) != 0)
14357                 return;
14358
14359         if ((magic != TG3_EEPROM_MAGIC) &&
14360             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14361             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14362                 return;
14363
14364         /*
14365          * Size the chip by reading offsets at increasing powers of two.
14366          * When we encounter our validation signature, we know the addressing
14367          * has wrapped around, and thus have our chip size.
14368          */
14369         cursize = 0x10;
14370
14371         while (cursize < tp->nvram_size) {
14372                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14373                         return;
14374
14375                 if (val == magic)
14376                         break;
14377
14378                 cursize <<= 1;
14379         }
14380
14381         tp->nvram_size = cursize;
14382 }
14383
14384 static void tg3_get_nvram_size(struct tg3 *tp)
14385 {
14386         u32 val;
14387
14388         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14389                 return;
14390
14391         /* Selfboot format */
14392         if (val != TG3_EEPROM_MAGIC) {
14393                 tg3_get_eeprom_size(tp);
14394                 return;
14395         }
14396
14397         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14398                 if (val != 0) {
14399                         /* This is confusing.  We want to operate on the
14400                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14401                          * call will read from NVRAM and byteswap the data
14402                          * according to the byteswapping settings for all
14403                          * other register accesses.  This ensures the data we
14404                          * want will always reside in the lower 16-bits.
14405                          * However, the data in NVRAM is in LE format, which
14406                          * means the data from the NVRAM read will always be
14407                          * opposite the endianness of the CPU.  The 16-bit
14408                          * byteswap then brings the data to CPU endianness.
14409                          */
14410                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14411                         return;
14412                 }
14413         }
14414         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14415 }
14416
14417 static void tg3_get_nvram_info(struct tg3 *tp)
14418 {
14419         u32 nvcfg1;
14420
14421         nvcfg1 = tr32(NVRAM_CFG1);
14422         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14423                 tg3_flag_set(tp, FLASH);
14424         } else {
14425                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14426                 tw32(NVRAM_CFG1, nvcfg1);
14427         }
14428
14429         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14430             tg3_flag(tp, 5780_CLASS)) {
14431                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14432                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14433                         tp->nvram_jedecnum = JEDEC_ATMEL;
14434                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14435                         tg3_flag_set(tp, NVRAM_BUFFERED);
14436                         break;
14437                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14438                         tp->nvram_jedecnum = JEDEC_ATMEL;
14439                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14440                         break;
14441                 case FLASH_VENDOR_ATMEL_EEPROM:
14442                         tp->nvram_jedecnum = JEDEC_ATMEL;
14443                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14444                         tg3_flag_set(tp, NVRAM_BUFFERED);
14445                         break;
14446                 case FLASH_VENDOR_ST:
14447                         tp->nvram_jedecnum = JEDEC_ST;
14448                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14449                         tg3_flag_set(tp, NVRAM_BUFFERED);
14450                         break;
14451                 case FLASH_VENDOR_SAIFUN:
14452                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14453                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14454                         break;
14455                 case FLASH_VENDOR_SST_SMALL:
14456                 case FLASH_VENDOR_SST_LARGE:
14457                         tp->nvram_jedecnum = JEDEC_SST;
14458                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14459                         break;
14460                 }
14461         } else {
14462                 tp->nvram_jedecnum = JEDEC_ATMEL;
14463                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14464                 tg3_flag_set(tp, NVRAM_BUFFERED);
14465         }
14466 }
14467
14468 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14469 {
14470         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14471         case FLASH_5752PAGE_SIZE_256:
14472                 tp->nvram_pagesize = 256;
14473                 break;
14474         case FLASH_5752PAGE_SIZE_512:
14475                 tp->nvram_pagesize = 512;
14476                 break;
14477         case FLASH_5752PAGE_SIZE_1K:
14478                 tp->nvram_pagesize = 1024;
14479                 break;
14480         case FLASH_5752PAGE_SIZE_2K:
14481                 tp->nvram_pagesize = 2048;
14482                 break;
14483         case FLASH_5752PAGE_SIZE_4K:
14484                 tp->nvram_pagesize = 4096;
14485                 break;
14486         case FLASH_5752PAGE_SIZE_264:
14487                 tp->nvram_pagesize = 264;
14488                 break;
14489         case FLASH_5752PAGE_SIZE_528:
14490                 tp->nvram_pagesize = 528;
14491                 break;
14492         }
14493 }
14494
14495 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14496 {
14497         u32 nvcfg1;
14498
14499         nvcfg1 = tr32(NVRAM_CFG1);
14500
14501         /* NVRAM protection for TPM */
14502         if (nvcfg1 & (1 << 27))
14503                 tg3_flag_set(tp, PROTECTED_NVRAM);
14504
14505         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14506         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14507         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14508                 tp->nvram_jedecnum = JEDEC_ATMEL;
14509                 tg3_flag_set(tp, NVRAM_BUFFERED);
14510                 break;
14511         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14512                 tp->nvram_jedecnum = JEDEC_ATMEL;
14513                 tg3_flag_set(tp, NVRAM_BUFFERED);
14514                 tg3_flag_set(tp, FLASH);
14515                 break;
14516         case FLASH_5752VENDOR_ST_M45PE10:
14517         case FLASH_5752VENDOR_ST_M45PE20:
14518         case FLASH_5752VENDOR_ST_M45PE40:
14519                 tp->nvram_jedecnum = JEDEC_ST;
14520                 tg3_flag_set(tp, NVRAM_BUFFERED);
14521                 tg3_flag_set(tp, FLASH);
14522                 break;
14523         }
14524
14525         if (tg3_flag(tp, FLASH)) {
14526                 tg3_nvram_get_pagesize(tp, nvcfg1);
14527         } else {
14528                 /* For eeprom, set pagesize to maximum eeprom size */
14529                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14530
14531                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14532                 tw32(NVRAM_CFG1, nvcfg1);
14533         }
14534 }
14535
14536 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14537 {
14538         u32 nvcfg1, protect = 0;
14539
14540         nvcfg1 = tr32(NVRAM_CFG1);
14541
14542         /* NVRAM protection for TPM */
14543         if (nvcfg1 & (1 << 27)) {
14544                 tg3_flag_set(tp, PROTECTED_NVRAM);
14545                 protect = 1;
14546         }
14547
14548         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14549         switch (nvcfg1) {
14550         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14551         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14552         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14553         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14554                 tp->nvram_jedecnum = JEDEC_ATMEL;
14555                 tg3_flag_set(tp, NVRAM_BUFFERED);
14556                 tg3_flag_set(tp, FLASH);
14557                 tp->nvram_pagesize = 264;
14558                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14559                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14560                         tp->nvram_size = (protect ? 0x3e200 :
14561                                           TG3_NVRAM_SIZE_512KB);
14562                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14563                         tp->nvram_size = (protect ? 0x1f200 :
14564                                           TG3_NVRAM_SIZE_256KB);
14565                 else
14566                         tp->nvram_size = (protect ? 0x1f200 :
14567                                           TG3_NVRAM_SIZE_128KB);
14568                 break;
14569         case FLASH_5752VENDOR_ST_M45PE10:
14570         case FLASH_5752VENDOR_ST_M45PE20:
14571         case FLASH_5752VENDOR_ST_M45PE40:
14572                 tp->nvram_jedecnum = JEDEC_ST;
14573                 tg3_flag_set(tp, NVRAM_BUFFERED);
14574                 tg3_flag_set(tp, FLASH);
14575                 tp->nvram_pagesize = 256;
14576                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14577                         tp->nvram_size = (protect ?
14578                                           TG3_NVRAM_SIZE_64KB :
14579                                           TG3_NVRAM_SIZE_128KB);
14580                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14581                         tp->nvram_size = (protect ?
14582                                           TG3_NVRAM_SIZE_64KB :
14583                                           TG3_NVRAM_SIZE_256KB);
14584                 else
14585                         tp->nvram_size = (protect ?
14586                                           TG3_NVRAM_SIZE_128KB :
14587                                           TG3_NVRAM_SIZE_512KB);
14588                 break;
14589         }
14590 }
14591
14592 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14593 {
14594         u32 nvcfg1;
14595
14596         nvcfg1 = tr32(NVRAM_CFG1);
14597
14598         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14599         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14600         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14601         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14602         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14603                 tp->nvram_jedecnum = JEDEC_ATMEL;
14604                 tg3_flag_set(tp, NVRAM_BUFFERED);
14605                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14606
14607                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14608                 tw32(NVRAM_CFG1, nvcfg1);
14609                 break;
14610         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14611         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14612         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14613         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14614                 tp->nvram_jedecnum = JEDEC_ATMEL;
14615                 tg3_flag_set(tp, NVRAM_BUFFERED);
14616                 tg3_flag_set(tp, FLASH);
14617                 tp->nvram_pagesize = 264;
14618                 break;
14619         case FLASH_5752VENDOR_ST_M45PE10:
14620         case FLASH_5752VENDOR_ST_M45PE20:
14621         case FLASH_5752VENDOR_ST_M45PE40:
14622                 tp->nvram_jedecnum = JEDEC_ST;
14623                 tg3_flag_set(tp, NVRAM_BUFFERED);
14624                 tg3_flag_set(tp, FLASH);
14625                 tp->nvram_pagesize = 256;
14626                 break;
14627         }
14628 }
14629
14630 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14631 {
14632         u32 nvcfg1, protect = 0;
14633
14634         nvcfg1 = tr32(NVRAM_CFG1);
14635
14636         /* NVRAM protection for TPM */
14637         if (nvcfg1 & (1 << 27)) {
14638                 tg3_flag_set(tp, PROTECTED_NVRAM);
14639                 protect = 1;
14640         }
14641
14642         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14643         switch (nvcfg1) {
14644         case FLASH_5761VENDOR_ATMEL_ADB021D:
14645         case FLASH_5761VENDOR_ATMEL_ADB041D:
14646         case FLASH_5761VENDOR_ATMEL_ADB081D:
14647         case FLASH_5761VENDOR_ATMEL_ADB161D:
14648         case FLASH_5761VENDOR_ATMEL_MDB021D:
14649         case FLASH_5761VENDOR_ATMEL_MDB041D:
14650         case FLASH_5761VENDOR_ATMEL_MDB081D:
14651         case FLASH_5761VENDOR_ATMEL_MDB161D:
14652                 tp->nvram_jedecnum = JEDEC_ATMEL;
14653                 tg3_flag_set(tp, NVRAM_BUFFERED);
14654                 tg3_flag_set(tp, FLASH);
14655                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14656                 tp->nvram_pagesize = 256;
14657                 break;
14658         case FLASH_5761VENDOR_ST_A_M45PE20:
14659         case FLASH_5761VENDOR_ST_A_M45PE40:
14660         case FLASH_5761VENDOR_ST_A_M45PE80:
14661         case FLASH_5761VENDOR_ST_A_M45PE16:
14662         case FLASH_5761VENDOR_ST_M_M45PE20:
14663         case FLASH_5761VENDOR_ST_M_M45PE40:
14664         case FLASH_5761VENDOR_ST_M_M45PE80:
14665         case FLASH_5761VENDOR_ST_M_M45PE16:
14666                 tp->nvram_jedecnum = JEDEC_ST;
14667                 tg3_flag_set(tp, NVRAM_BUFFERED);
14668                 tg3_flag_set(tp, FLASH);
14669                 tp->nvram_pagesize = 256;
14670                 break;
14671         }
14672
14673         if (protect) {
14674                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14675         } else {
14676                 switch (nvcfg1) {
14677                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14678                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14679                 case FLASH_5761VENDOR_ST_A_M45PE16:
14680                 case FLASH_5761VENDOR_ST_M_M45PE16:
14681                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14682                         break;
14683                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14684                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14685                 case FLASH_5761VENDOR_ST_A_M45PE80:
14686                 case FLASH_5761VENDOR_ST_M_M45PE80:
14687                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14688                         break;
14689                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14690                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14691                 case FLASH_5761VENDOR_ST_A_M45PE40:
14692                 case FLASH_5761VENDOR_ST_M_M45PE40:
14693                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14694                         break;
14695                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14696                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14697                 case FLASH_5761VENDOR_ST_A_M45PE20:
14698                 case FLASH_5761VENDOR_ST_M_M45PE20:
14699                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14700                         break;
14701                 }
14702         }
14703 }
14704
14705 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14706 {
14707         tp->nvram_jedecnum = JEDEC_ATMEL;
14708         tg3_flag_set(tp, NVRAM_BUFFERED);
14709         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14710 }
14711
14712 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14713 {
14714         u32 nvcfg1;
14715
14716         nvcfg1 = tr32(NVRAM_CFG1);
14717
14718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14720         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14721                 tp->nvram_jedecnum = JEDEC_ATMEL;
14722                 tg3_flag_set(tp, NVRAM_BUFFERED);
14723                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14724
14725                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726                 tw32(NVRAM_CFG1, nvcfg1);
14727                 return;
14728         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14729         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14730         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14731         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14732         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14733         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14734         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14735                 tp->nvram_jedecnum = JEDEC_ATMEL;
14736                 tg3_flag_set(tp, NVRAM_BUFFERED);
14737                 tg3_flag_set(tp, FLASH);
14738
14739                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14741                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14742                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14743                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14744                         break;
14745                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14746                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14747                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14748                         break;
14749                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14750                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14751                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14752                         break;
14753                 }
14754                 break;
14755         case FLASH_5752VENDOR_ST_M45PE10:
14756         case FLASH_5752VENDOR_ST_M45PE20:
14757         case FLASH_5752VENDOR_ST_M45PE40:
14758                 tp->nvram_jedecnum = JEDEC_ST;
14759                 tg3_flag_set(tp, NVRAM_BUFFERED);
14760                 tg3_flag_set(tp, FLASH);
14761
14762                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14763                 case FLASH_5752VENDOR_ST_M45PE10:
14764                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14765                         break;
14766                 case FLASH_5752VENDOR_ST_M45PE20:
14767                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14768                         break;
14769                 case FLASH_5752VENDOR_ST_M45PE40:
14770                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14771                         break;
14772                 }
14773                 break;
14774         default:
14775                 tg3_flag_set(tp, NO_NVRAM);
14776                 return;
14777         }
14778
14779         tg3_nvram_get_pagesize(tp, nvcfg1);
14780         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14781                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14782 }
14783
14784
14785 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14786 {
14787         u32 nvcfg1;
14788
14789         nvcfg1 = tr32(NVRAM_CFG1);
14790
14791         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14792         case FLASH_5717VENDOR_ATMEL_EEPROM:
14793         case FLASH_5717VENDOR_MICRO_EEPROM:
14794                 tp->nvram_jedecnum = JEDEC_ATMEL;
14795                 tg3_flag_set(tp, NVRAM_BUFFERED);
14796                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14797
14798                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14799                 tw32(NVRAM_CFG1, nvcfg1);
14800                 return;
14801         case FLASH_5717VENDOR_ATMEL_MDB011D:
14802         case FLASH_5717VENDOR_ATMEL_ADB011B:
14803         case FLASH_5717VENDOR_ATMEL_ADB011D:
14804         case FLASH_5717VENDOR_ATMEL_MDB021D:
14805         case FLASH_5717VENDOR_ATMEL_ADB021B:
14806         case FLASH_5717VENDOR_ATMEL_ADB021D:
14807         case FLASH_5717VENDOR_ATMEL_45USPT:
14808                 tp->nvram_jedecnum = JEDEC_ATMEL;
14809                 tg3_flag_set(tp, NVRAM_BUFFERED);
14810                 tg3_flag_set(tp, FLASH);
14811
14812                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14813                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14814                         /* Detect size with tg3_nvram_get_size() */
14815                         break;
14816                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14817                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14818                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14819                         break;
14820                 default:
14821                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14822                         break;
14823                 }
14824                 break;
14825         case FLASH_5717VENDOR_ST_M_M25PE10:
14826         case FLASH_5717VENDOR_ST_A_M25PE10:
14827         case FLASH_5717VENDOR_ST_M_M45PE10:
14828         case FLASH_5717VENDOR_ST_A_M45PE10:
14829         case FLASH_5717VENDOR_ST_M_M25PE20:
14830         case FLASH_5717VENDOR_ST_A_M25PE20:
14831         case FLASH_5717VENDOR_ST_M_M45PE20:
14832         case FLASH_5717VENDOR_ST_A_M45PE20:
14833         case FLASH_5717VENDOR_ST_25USPT:
14834         case FLASH_5717VENDOR_ST_45USPT:
14835                 tp->nvram_jedecnum = JEDEC_ST;
14836                 tg3_flag_set(tp, NVRAM_BUFFERED);
14837                 tg3_flag_set(tp, FLASH);
14838
14839                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14840                 case FLASH_5717VENDOR_ST_M_M25PE20:
14841                 case FLASH_5717VENDOR_ST_M_M45PE20:
14842                         /* Detect size with tg3_nvram_get_size() */
14843                         break;
14844                 case FLASH_5717VENDOR_ST_A_M25PE20:
14845                 case FLASH_5717VENDOR_ST_A_M45PE20:
14846                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14847                         break;
14848                 default:
14849                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14850                         break;
14851                 }
14852                 break;
14853         default:
14854                 tg3_flag_set(tp, NO_NVRAM);
14855                 return;
14856         }
14857
14858         tg3_nvram_get_pagesize(tp, nvcfg1);
14859         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14860                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14861 }
14862
14863 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14864 {
14865         u32 nvcfg1, nvmpinstrp, nv_status;
14866
14867         nvcfg1 = tr32(NVRAM_CFG1);
14868         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14869
14870         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14871                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14872                         tg3_flag_set(tp, NO_NVRAM);
14873                         return;
14874                 }
14875
14876                 switch (nvmpinstrp) {
14877                 case FLASH_5762_MX25L_100:
14878                 case FLASH_5762_MX25L_200:
14879                 case FLASH_5762_MX25L_400:
14880                 case FLASH_5762_MX25L_800:
14881                 case FLASH_5762_MX25L_160_320:
14882                         tp->nvram_pagesize = 4096;
14883                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14884                         tg3_flag_set(tp, NVRAM_BUFFERED);
14885                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14886                         tg3_flag_set(tp, FLASH);
14887                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14888                         tp->nvram_size =
14889                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14890                                                 AUTOSENSE_DEVID_MASK)
14891                                         << AUTOSENSE_SIZE_IN_MB);
14892                         return;
14893
14894                 case FLASH_5762_EEPROM_HD:
14895                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14896                         break;
14897                 case FLASH_5762_EEPROM_LD:
14898                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14899                         break;
14900                 case FLASH_5720VENDOR_M_ST_M45PE20:
14901                         /* This pinstrap supports multiple sizes, so force it
14902                          * to read the actual size from location 0xf0.
14903                          */
14904                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14905                         break;
14906                 }
14907         }
14908
14909         switch (nvmpinstrp) {
14910         case FLASH_5720_EEPROM_HD:
14911         case FLASH_5720_EEPROM_LD:
14912                 tp->nvram_jedecnum = JEDEC_ATMEL;
14913                 tg3_flag_set(tp, NVRAM_BUFFERED);
14914
14915                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14916                 tw32(NVRAM_CFG1, nvcfg1);
14917                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14918                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14919                 else
14920                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14921                 return;
14922         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14923         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14924         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14925         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14926         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14927         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14928         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14929         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14930         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14931         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14932         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14933         case FLASH_5720VENDOR_ATMEL_45USPT:
14934                 tp->nvram_jedecnum = JEDEC_ATMEL;
14935                 tg3_flag_set(tp, NVRAM_BUFFERED);
14936                 tg3_flag_set(tp, FLASH);
14937
14938                 switch (nvmpinstrp) {
14939                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14940                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14941                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14942                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14943                         break;
14944                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14945                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14946                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14947                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14948                         break;
14949                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14950                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14951                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14952                         break;
14953                 default:
14954                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14955                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14956                         break;
14957                 }
14958                 break;
14959         case FLASH_5720VENDOR_M_ST_M25PE10:
14960         case FLASH_5720VENDOR_M_ST_M45PE10:
14961         case FLASH_5720VENDOR_A_ST_M25PE10:
14962         case FLASH_5720VENDOR_A_ST_M45PE10:
14963         case FLASH_5720VENDOR_M_ST_M25PE20:
14964         case FLASH_5720VENDOR_M_ST_M45PE20:
14965         case FLASH_5720VENDOR_A_ST_M25PE20:
14966         case FLASH_5720VENDOR_A_ST_M45PE20:
14967         case FLASH_5720VENDOR_M_ST_M25PE40:
14968         case FLASH_5720VENDOR_M_ST_M45PE40:
14969         case FLASH_5720VENDOR_A_ST_M25PE40:
14970         case FLASH_5720VENDOR_A_ST_M45PE40:
14971         case FLASH_5720VENDOR_M_ST_M25PE80:
14972         case FLASH_5720VENDOR_M_ST_M45PE80:
14973         case FLASH_5720VENDOR_A_ST_M25PE80:
14974         case FLASH_5720VENDOR_A_ST_M45PE80:
14975         case FLASH_5720VENDOR_ST_25USPT:
14976         case FLASH_5720VENDOR_ST_45USPT:
14977                 tp->nvram_jedecnum = JEDEC_ST;
14978                 tg3_flag_set(tp, NVRAM_BUFFERED);
14979                 tg3_flag_set(tp, FLASH);
14980
14981                 switch (nvmpinstrp) {
14982                 case FLASH_5720VENDOR_M_ST_M25PE20:
14983                 case FLASH_5720VENDOR_M_ST_M45PE20:
14984                 case FLASH_5720VENDOR_A_ST_M25PE20:
14985                 case FLASH_5720VENDOR_A_ST_M45PE20:
14986                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14987                         break;
14988                 case FLASH_5720VENDOR_M_ST_M25PE40:
14989                 case FLASH_5720VENDOR_M_ST_M45PE40:
14990                 case FLASH_5720VENDOR_A_ST_M25PE40:
14991                 case FLASH_5720VENDOR_A_ST_M45PE40:
14992                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14993                         break;
14994                 case FLASH_5720VENDOR_M_ST_M25PE80:
14995                 case FLASH_5720VENDOR_M_ST_M45PE80:
14996                 case FLASH_5720VENDOR_A_ST_M25PE80:
14997                 case FLASH_5720VENDOR_A_ST_M45PE80:
14998                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14999                         break;
15000                 default:
15001                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15002                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15003                         break;
15004                 }
15005                 break;
15006         default:
15007                 tg3_flag_set(tp, NO_NVRAM);
15008                 return;
15009         }
15010
15011         tg3_nvram_get_pagesize(tp, nvcfg1);
15012         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15013                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15014
15015         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15016                 u32 val;
15017
15018                 if (tg3_nvram_read(tp, 0, &val))
15019                         return;
15020
15021                 if (val != TG3_EEPROM_MAGIC &&
15022                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15023                         tg3_flag_set(tp, NO_NVRAM);
15024         }
15025 }
15026
15027 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15028 static void tg3_nvram_init(struct tg3 *tp)
15029 {
15030         if (tg3_flag(tp, IS_SSB_CORE)) {
15031                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15032                 tg3_flag_clear(tp, NVRAM);
15033                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15034                 tg3_flag_set(tp, NO_NVRAM);
15035                 return;
15036         }
15037
15038         tw32_f(GRC_EEPROM_ADDR,
15039              (EEPROM_ADDR_FSM_RESET |
15040               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15041                EEPROM_ADDR_CLKPERD_SHIFT)));
15042
15043         msleep(1);
15044
15045         /* Enable seeprom accesses. */
15046         tw32_f(GRC_LOCAL_CTRL,
15047              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15048         udelay(100);
15049
15050         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15051             tg3_asic_rev(tp) != ASIC_REV_5701) {
15052                 tg3_flag_set(tp, NVRAM);
15053
15054                 if (tg3_nvram_lock(tp)) {
15055                         netdev_warn(tp->dev,
15056                                     "Cannot get nvram lock, %s failed\n",
15057                                     __func__);
15058                         return;
15059                 }
15060                 tg3_enable_nvram_access(tp);
15061
15062                 tp->nvram_size = 0;
15063
15064                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15065                         tg3_get_5752_nvram_info(tp);
15066                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15067                         tg3_get_5755_nvram_info(tp);
15068                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15069                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15070                          tg3_asic_rev(tp) == ASIC_REV_5785)
15071                         tg3_get_5787_nvram_info(tp);
15072                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15073                         tg3_get_5761_nvram_info(tp);
15074                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15075                         tg3_get_5906_nvram_info(tp);
15076                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15077                          tg3_flag(tp, 57765_CLASS))
15078                         tg3_get_57780_nvram_info(tp);
15079                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15080                          tg3_asic_rev(tp) == ASIC_REV_5719)
15081                         tg3_get_5717_nvram_info(tp);
15082                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15083                          tg3_asic_rev(tp) == ASIC_REV_5762)
15084                         tg3_get_5720_nvram_info(tp);
15085                 else
15086                         tg3_get_nvram_info(tp);
15087
15088                 if (tp->nvram_size == 0)
15089                         tg3_get_nvram_size(tp);
15090
15091                 tg3_disable_nvram_access(tp);
15092                 tg3_nvram_unlock(tp);
15093
15094         } else {
15095                 tg3_flag_clear(tp, NVRAM);
15096                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15097
15098                 tg3_get_eeprom_size(tp);
15099         }
15100 }
15101
15102 struct subsys_tbl_ent {
15103         u16 subsys_vendor, subsys_devid;
15104         u32 phy_id;
15105 };
15106
15107 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15108         /* Broadcom boards. */
15109         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15110           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15111         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15112           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15113         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15114           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15115         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15116           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15117         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15118           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15119         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15120           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15121         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15122           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15123         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15124           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15125         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15126           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15127         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15128           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15129         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15130           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15131
15132         /* 3com boards. */
15133         { TG3PCI_SUBVENDOR_ID_3COM,
15134           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15135         { TG3PCI_SUBVENDOR_ID_3COM,
15136           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15137         { TG3PCI_SUBVENDOR_ID_3COM,
15138           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15139         { TG3PCI_SUBVENDOR_ID_3COM,
15140           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15141         { TG3PCI_SUBVENDOR_ID_3COM,
15142           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15143
15144         /* DELL boards. */
15145         { TG3PCI_SUBVENDOR_ID_DELL,
15146           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15147         { TG3PCI_SUBVENDOR_ID_DELL,
15148           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15149         { TG3PCI_SUBVENDOR_ID_DELL,
15150           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15151         { TG3PCI_SUBVENDOR_ID_DELL,
15152           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15153
15154         /* Compaq boards. */
15155         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15156           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15157         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15158           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15159         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15160           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15161         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15162           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15163         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15164           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15165
15166         /* IBM boards. */
15167         { TG3PCI_SUBVENDOR_ID_IBM,
15168           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15169 };
15170
15171 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15172 {
15173         int i;
15174
15175         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15176                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15177                      tp->pdev->subsystem_vendor) &&
15178                     (subsys_id_to_phy_id[i].subsys_devid ==
15179                      tp->pdev->subsystem_device))
15180                         return &subsys_id_to_phy_id[i];
15181         }
15182         return NULL;
15183 }
15184
15185 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15186 {
15187         u32 val;
15188
15189         tp->phy_id = TG3_PHY_ID_INVALID;
15190         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15191
15192         /* Assume an onboard device and WOL capable by default.  */
15193         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15194         tg3_flag_set(tp, WOL_CAP);
15195
15196         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15197                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15198                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15199                         tg3_flag_set(tp, IS_NIC);
15200                 }
15201                 val = tr32(VCPU_CFGSHDW);
15202                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15203                         tg3_flag_set(tp, ASPM_WORKAROUND);
15204                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15205                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15206                         tg3_flag_set(tp, WOL_ENABLE);
15207                         device_set_wakeup_enable(&tp->pdev->dev, true);
15208                 }
15209                 goto done;
15210         }
15211
15212         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15213         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15214                 u32 nic_cfg, led_cfg;
15215                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15216                 u32 nic_phy_id, ver, eeprom_phy_id;
15217                 int eeprom_phy_serdes = 0;
15218
15219                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15220                 tp->nic_sram_data_cfg = nic_cfg;
15221
15222                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15223                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15224                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15225                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15226                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15227                     (ver > 0) && (ver < 0x100))
15228                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15229
15230                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15231                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15232
15233                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15234                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15235                     tg3_asic_rev(tp) == ASIC_REV_5720)
15236                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15237
15238                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15239                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15240                         eeprom_phy_serdes = 1;
15241
15242                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15243                 if (nic_phy_id != 0) {
15244                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15245                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15246
15247                         eeprom_phy_id  = (id1 >> 16) << 10;
15248                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15249                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15250                 } else
15251                         eeprom_phy_id = 0;
15252
15253                 tp->phy_id = eeprom_phy_id;
15254                 if (eeprom_phy_serdes) {
15255                         if (!tg3_flag(tp, 5705_PLUS))
15256                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15257                         else
15258                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15259                 }
15260
15261                 if (tg3_flag(tp, 5750_PLUS))
15262                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15263                                     SHASTA_EXT_LED_MODE_MASK);
15264                 else
15265                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15266
15267                 switch (led_cfg) {
15268                 default:
15269                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15270                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15271                         break;
15272
15273                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15274                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15275                         break;
15276
15277                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15278                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15279
15280                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15281                          * read on some older 5700/5701 bootcode.
15282                          */
15283                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15284                             tg3_asic_rev(tp) == ASIC_REV_5701)
15285                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15286
15287                         break;
15288
15289                 case SHASTA_EXT_LED_SHARED:
15290                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15291                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15292                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15293                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15294                                                  LED_CTRL_MODE_PHY_2);
15295
15296                         if (tg3_flag(tp, 5717_PLUS) ||
15297                             tg3_asic_rev(tp) == ASIC_REV_5762)
15298                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15299                                                 LED_CTRL_BLINK_RATE_MASK;
15300
15301                         break;
15302
15303                 case SHASTA_EXT_LED_MAC:
15304                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15305                         break;
15306
15307                 case SHASTA_EXT_LED_COMBO:
15308                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15309                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15310                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15311                                                  LED_CTRL_MODE_PHY_2);
15312                         break;
15313
15314                 }
15315
15316                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15317                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15318                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15319                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15320
15321                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15322                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15323
15324                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15325                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15326                         if ((tp->pdev->subsystem_vendor ==
15327                              PCI_VENDOR_ID_ARIMA) &&
15328                             (tp->pdev->subsystem_device == 0x205a ||
15329                              tp->pdev->subsystem_device == 0x2063))
15330                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15331                 } else {
15332                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15333                         tg3_flag_set(tp, IS_NIC);
15334                 }
15335
15336                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15337                         tg3_flag_set(tp, ENABLE_ASF);
15338                         if (tg3_flag(tp, 5750_PLUS))
15339                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15340                 }
15341
15342                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15343                     tg3_flag(tp, 5750_PLUS))
15344                         tg3_flag_set(tp, ENABLE_APE);
15345
15346                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15347                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15348                         tg3_flag_clear(tp, WOL_CAP);
15349
15350                 if (tg3_flag(tp, WOL_CAP) &&
15351                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15352                         tg3_flag_set(tp, WOL_ENABLE);
15353                         device_set_wakeup_enable(&tp->pdev->dev, true);
15354                 }
15355
15356                 if (cfg2 & (1 << 17))
15357                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15358
15359                 /* serdes signal pre-emphasis in register 0x590 set by */
15360                 /* bootcode if bit 18 is set */
15361                 if (cfg2 & (1 << 18))
15362                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15363
15364                 if ((tg3_flag(tp, 57765_PLUS) ||
15365                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15366                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15367                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15368                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15369
15370                 if (tg3_flag(tp, PCI_EXPRESS)) {
15371                         u32 cfg3;
15372
15373                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15374                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15375                             !tg3_flag(tp, 57765_PLUS) &&
15376                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15377                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15378                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15379                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15380                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15381                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15382                 }
15383
15384                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15385                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15386                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15387                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15388                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15389                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15390
15391                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15392                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15393         }
15394 done:
15395         if (tg3_flag(tp, WOL_CAP))
15396                 device_set_wakeup_enable(&tp->pdev->dev,
15397                                          tg3_flag(tp, WOL_ENABLE));
15398         else
15399                 device_set_wakeup_capable(&tp->pdev->dev, false);
15400 }
15401
15402 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15403 {
15404         int i, err;
15405         u32 val2, off = offset * 8;
15406
15407         err = tg3_nvram_lock(tp);
15408         if (err)
15409                 return err;
15410
15411         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15412         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15413                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15414         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15415         udelay(10);
15416
15417         for (i = 0; i < 100; i++) {
15418                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15419                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15420                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15421                         break;
15422                 }
15423                 udelay(10);
15424         }
15425
15426         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15427
15428         tg3_nvram_unlock(tp);
15429         if (val2 & APE_OTP_STATUS_CMD_DONE)
15430                 return 0;
15431
15432         return -EBUSY;
15433 }
15434
15435 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15436 {
15437         int i;
15438         u32 val;
15439
15440         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15441         tw32(OTP_CTRL, cmd);
15442
15443         /* Wait for up to 1 ms for command to execute. */
15444         for (i = 0; i < 100; i++) {
15445                 val = tr32(OTP_STATUS);
15446                 if (val & OTP_STATUS_CMD_DONE)
15447                         break;
15448                 udelay(10);
15449         }
15450
15451         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15452 }
15453
15454 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15455  * configuration is a 32-bit value that straddles the alignment boundary.
15456  * We do two 32-bit reads and then shift and merge the results.
15457  */
15458 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15459 {
15460         u32 bhalf_otp, thalf_otp;
15461
15462         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15463
15464         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15465                 return 0;
15466
15467         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15468
15469         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15470                 return 0;
15471
15472         thalf_otp = tr32(OTP_READ_DATA);
15473
15474         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15475
15476         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15477                 return 0;
15478
15479         bhalf_otp = tr32(OTP_READ_DATA);
15480
15481         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15482 }
15483
15484 static void tg3_phy_init_link_config(struct tg3 *tp)
15485 {
15486         u32 adv = ADVERTISED_Autoneg;
15487
15488         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15489                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15490                         adv |= ADVERTISED_1000baseT_Half;
15491                 adv |= ADVERTISED_1000baseT_Full;
15492         }
15493
15494         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15495                 adv |= ADVERTISED_100baseT_Half |
15496                        ADVERTISED_100baseT_Full |
15497                        ADVERTISED_10baseT_Half |
15498                        ADVERTISED_10baseT_Full |
15499                        ADVERTISED_TP;
15500         else
15501                 adv |= ADVERTISED_FIBRE;
15502
15503         tp->link_config.advertising = adv;
15504         tp->link_config.speed = SPEED_UNKNOWN;
15505         tp->link_config.duplex = DUPLEX_UNKNOWN;
15506         tp->link_config.autoneg = AUTONEG_ENABLE;
15507         tp->link_config.active_speed = SPEED_UNKNOWN;
15508         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15509
15510         tp->old_link = -1;
15511 }
15512
15513 static int tg3_phy_probe(struct tg3 *tp)
15514 {
15515         u32 hw_phy_id_1, hw_phy_id_2;
15516         u32 hw_phy_id, hw_phy_id_masked;
15517         int err;
15518
15519         /* flow control autonegotiation is default behavior */
15520         tg3_flag_set(tp, PAUSE_AUTONEG);
15521         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15522
15523         if (tg3_flag(tp, ENABLE_APE)) {
15524                 switch (tp->pci_fn) {
15525                 case 0:
15526                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15527                         break;
15528                 case 1:
15529                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15530                         break;
15531                 case 2:
15532                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15533                         break;
15534                 case 3:
15535                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15536                         break;
15537                 }
15538         }
15539
15540         if (!tg3_flag(tp, ENABLE_ASF) &&
15541             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15542             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15543                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15544                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15545
15546         if (tg3_flag(tp, USE_PHYLIB))
15547                 return tg3_phy_init(tp);
15548
15549         /* Reading the PHY ID register can conflict with ASF
15550          * firmware access to the PHY hardware.
15551          */
15552         err = 0;
15553         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15554                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15555         } else {
15556                 /* Now read the physical PHY_ID from the chip and verify
15557                  * that it is sane.  If it doesn't look good, we fall back
15558                  * to either the hard-coded table based PHY_ID and failing
15559                  * that the value found in the eeprom area.
15560                  */
15561                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15562                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15563
15564                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15565                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15566                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15567
15568                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15569         }
15570
15571         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15572                 tp->phy_id = hw_phy_id;
15573                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15574                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15575                 else
15576                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15577         } else {
15578                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15579                         /* Do nothing, phy ID already set up in
15580                          * tg3_get_eeprom_hw_cfg().
15581                          */
15582                 } else {
15583                         struct subsys_tbl_ent *p;
15584
15585                         /* No eeprom signature?  Try the hardcoded
15586                          * subsys device table.
15587                          */
15588                         p = tg3_lookup_by_subsys(tp);
15589                         if (p) {
15590                                 tp->phy_id = p->phy_id;
15591                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15592                                 /* For now we saw the IDs 0xbc050cd0,
15593                                  * 0xbc050f80 and 0xbc050c30 on devices
15594                                  * connected to an BCM4785 and there are
15595                                  * probably more. Just assume that the phy is
15596                                  * supported when it is connected to a SSB core
15597                                  * for now.
15598                                  */
15599                                 return -ENODEV;
15600                         }
15601
15602                         if (!tp->phy_id ||
15603                             tp->phy_id == TG3_PHY_ID_BCM8002)
15604                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15605                 }
15606         }
15607
15608         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15609             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15610              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15611              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15612              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15613              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15614               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15615              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15616               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15617                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15618
15619                 tp->eee.supported = SUPPORTED_100baseT_Full |
15620                                     SUPPORTED_1000baseT_Full;
15621                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15622                                      ADVERTISED_1000baseT_Full;
15623                 tp->eee.eee_enabled = 1;
15624                 tp->eee.tx_lpi_enabled = 1;
15625                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15626         }
15627
15628         tg3_phy_init_link_config(tp);
15629
15630         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15631             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15632             !tg3_flag(tp, ENABLE_APE) &&
15633             !tg3_flag(tp, ENABLE_ASF)) {
15634                 u32 bmsr, dummy;
15635
15636                 tg3_readphy(tp, MII_BMSR, &bmsr);
15637                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15638                     (bmsr & BMSR_LSTATUS))
15639                         goto skip_phy_reset;
15640
15641                 err = tg3_phy_reset(tp);
15642                 if (err)
15643                         return err;
15644
15645                 tg3_phy_set_wirespeed(tp);
15646
15647                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15648                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15649                                             tp->link_config.flowctrl);
15650
15651                         tg3_writephy(tp, MII_BMCR,
15652                                      BMCR_ANENABLE | BMCR_ANRESTART);
15653                 }
15654         }
15655
15656 skip_phy_reset:
15657         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15658                 err = tg3_init_5401phy_dsp(tp);
15659                 if (err)
15660                         return err;
15661
15662                 err = tg3_init_5401phy_dsp(tp);
15663         }
15664
15665         return err;
15666 }
15667
15668 static void tg3_read_vpd(struct tg3 *tp)
15669 {
15670         u8 *vpd_data;
15671         unsigned int len, vpdlen;
15672         int i;
15673
15674         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15675         if (!vpd_data)
15676                 goto out_no_vpd;
15677
15678         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15679                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15680         if (i < 0)
15681                 goto partno;
15682
15683         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15684                 goto partno;
15685
15686         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15687                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15688         if (i < 0)
15689                 goto partno;
15690
15691         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15692         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15693
15694 partno:
15695         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15696                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15697         if (i < 0)
15698                 goto out_not_found;
15699
15700         if (len > TG3_BPN_SIZE)
15701                 goto out_not_found;
15702
15703         memcpy(tp->board_part_number, &vpd_data[i], len);
15704
15705 out_not_found:
15706         kfree(vpd_data);
15707         if (tp->board_part_number[0])
15708                 return;
15709
15710 out_no_vpd:
15711         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15712                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15713                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15714                         strcpy(tp->board_part_number, "BCM5717");
15715                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15716                         strcpy(tp->board_part_number, "BCM5718");
15717                 else
15718                         goto nomatch;
15719         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15720                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15721                         strcpy(tp->board_part_number, "BCM57780");
15722                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15723                         strcpy(tp->board_part_number, "BCM57760");
15724                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15725                         strcpy(tp->board_part_number, "BCM57790");
15726                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15727                         strcpy(tp->board_part_number, "BCM57788");
15728                 else
15729                         goto nomatch;
15730         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15731                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15732                         strcpy(tp->board_part_number, "BCM57761");
15733                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15734                         strcpy(tp->board_part_number, "BCM57765");
15735                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15736                         strcpy(tp->board_part_number, "BCM57781");
15737                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15738                         strcpy(tp->board_part_number, "BCM57785");
15739                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15740                         strcpy(tp->board_part_number, "BCM57791");
15741                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15742                         strcpy(tp->board_part_number, "BCM57795");
15743                 else
15744                         goto nomatch;
15745         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15746                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15747                         strcpy(tp->board_part_number, "BCM57762");
15748                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15749                         strcpy(tp->board_part_number, "BCM57766");
15750                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15751                         strcpy(tp->board_part_number, "BCM57782");
15752                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15753                         strcpy(tp->board_part_number, "BCM57786");
15754                 else
15755                         goto nomatch;
15756         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15757                 strcpy(tp->board_part_number, "BCM95906");
15758         } else {
15759 nomatch:
15760                 strcpy(tp->board_part_number, "none");
15761         }
15762 }
15763
15764 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15765 {
15766         u32 val;
15767
15768         if (tg3_nvram_read(tp, offset, &val) ||
15769             (val & 0xfc000000) != 0x0c000000 ||
15770             tg3_nvram_read(tp, offset + 4, &val) ||
15771             val != 0)
15772                 return 0;
15773
15774         return 1;
15775 }
15776
15777 static void tg3_read_bc_ver(struct tg3 *tp)
15778 {
15779         u32 val, offset, start, ver_offset;
15780         int i, dst_off;
15781         bool newver = false;
15782
15783         if (tg3_nvram_read(tp, 0xc, &offset) ||
15784             tg3_nvram_read(tp, 0x4, &start))
15785                 return;
15786
15787         offset = tg3_nvram_logical_addr(tp, offset);
15788
15789         if (tg3_nvram_read(tp, offset, &val))
15790                 return;
15791
15792         if ((val & 0xfc000000) == 0x0c000000) {
15793                 if (tg3_nvram_read(tp, offset + 4, &val))
15794                         return;
15795
15796                 if (val == 0)
15797                         newver = true;
15798         }
15799
15800         dst_off = strlen(tp->fw_ver);
15801
15802         if (newver) {
15803                 if (TG3_VER_SIZE - dst_off < 16 ||
15804                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15805                         return;
15806
15807                 offset = offset + ver_offset - start;
15808                 for (i = 0; i < 16; i += 4) {
15809                         __be32 v;
15810                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15811                                 return;
15812
15813                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15814                 }
15815         } else {
15816                 u32 major, minor;
15817
15818                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15819                         return;
15820
15821                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15822                         TG3_NVM_BCVER_MAJSFT;
15823                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15824                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15825                          "v%d.%02d", major, minor);
15826         }
15827 }
15828
15829 static void tg3_read_hwsb_ver(struct tg3 *tp)
15830 {
15831         u32 val, major, minor;
15832
15833         /* Use native endian representation */
15834         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15835                 return;
15836
15837         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15838                 TG3_NVM_HWSB_CFG1_MAJSFT;
15839         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15840                 TG3_NVM_HWSB_CFG1_MINSFT;
15841
15842         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15843 }
15844
15845 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15846 {
15847         u32 offset, major, minor, build;
15848
15849         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15850
15851         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15852                 return;
15853
15854         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15855         case TG3_EEPROM_SB_REVISION_0:
15856                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15857                 break;
15858         case TG3_EEPROM_SB_REVISION_2:
15859                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15860                 break;
15861         case TG3_EEPROM_SB_REVISION_3:
15862                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15863                 break;
15864         case TG3_EEPROM_SB_REVISION_4:
15865                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15866                 break;
15867         case TG3_EEPROM_SB_REVISION_5:
15868                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15869                 break;
15870         case TG3_EEPROM_SB_REVISION_6:
15871                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15872                 break;
15873         default:
15874                 return;
15875         }
15876
15877         if (tg3_nvram_read(tp, offset, &val))
15878                 return;
15879
15880         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15881                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15882         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15883                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15884         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15885
15886         if (minor > 99 || build > 26)
15887                 return;
15888
15889         offset = strlen(tp->fw_ver);
15890         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15891                  " v%d.%02d", major, minor);
15892
15893         if (build > 0) {
15894                 offset = strlen(tp->fw_ver);
15895                 if (offset < TG3_VER_SIZE - 1)
15896                         tp->fw_ver[offset] = 'a' + build - 1;
15897         }
15898 }
15899
15900 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15901 {
15902         u32 val, offset, start;
15903         int i, vlen;
15904
15905         for (offset = TG3_NVM_DIR_START;
15906              offset < TG3_NVM_DIR_END;
15907              offset += TG3_NVM_DIRENT_SIZE) {
15908                 if (tg3_nvram_read(tp, offset, &val))
15909                         return;
15910
15911                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15912                         break;
15913         }
15914
15915         if (offset == TG3_NVM_DIR_END)
15916                 return;
15917
15918         if (!tg3_flag(tp, 5705_PLUS))
15919                 start = 0x08000000;
15920         else if (tg3_nvram_read(tp, offset - 4, &start))
15921                 return;
15922
15923         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15924             !tg3_fw_img_is_valid(tp, offset) ||
15925             tg3_nvram_read(tp, offset + 8, &val))
15926                 return;
15927
15928         offset += val - start;
15929
15930         vlen = strlen(tp->fw_ver);
15931
15932         tp->fw_ver[vlen++] = ',';
15933         tp->fw_ver[vlen++] = ' ';
15934
15935         for (i = 0; i < 4; i++) {
15936                 __be32 v;
15937                 if (tg3_nvram_read_be32(tp, offset, &v))
15938                         return;
15939
15940                 offset += sizeof(v);
15941
15942                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15943                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15944                         break;
15945                 }
15946
15947                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15948                 vlen += sizeof(v);
15949         }
15950 }
15951
15952 static void tg3_probe_ncsi(struct tg3 *tp)
15953 {
15954         u32 apedata;
15955
15956         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15957         if (apedata != APE_SEG_SIG_MAGIC)
15958                 return;
15959
15960         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15961         if (!(apedata & APE_FW_STATUS_READY))
15962                 return;
15963
15964         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15965                 tg3_flag_set(tp, APE_HAS_NCSI);
15966 }
15967
15968 static void tg3_read_dash_ver(struct tg3 *tp)
15969 {
15970         int vlen;
15971         u32 apedata;
15972         char *fwtype;
15973
15974         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15975
15976         if (tg3_flag(tp, APE_HAS_NCSI))
15977                 fwtype = "NCSI";
15978         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15979                 fwtype = "SMASH";
15980         else
15981                 fwtype = "DASH";
15982
15983         vlen = strlen(tp->fw_ver);
15984
15985         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15986                  fwtype,
15987                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15988                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15989                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15990                  (apedata & APE_FW_VERSION_BLDMSK));
15991 }
15992
15993 static void tg3_read_otp_ver(struct tg3 *tp)
15994 {
15995         u32 val, val2;
15996
15997         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15998                 return;
15999
16000         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16001             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16002             TG3_OTP_MAGIC0_VALID(val)) {
16003                 u64 val64 = (u64) val << 32 | val2;
16004                 u32 ver = 0;
16005                 int i, vlen;
16006
16007                 for (i = 0; i < 7; i++) {
16008                         if ((val64 & 0xff) == 0)
16009                                 break;
16010                         ver = val64 & 0xff;
16011                         val64 >>= 8;
16012                 }
16013                 vlen = strlen(tp->fw_ver);
16014                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16015         }
16016 }
16017
16018 static void tg3_read_fw_ver(struct tg3 *tp)
16019 {
16020         u32 val;
16021         bool vpd_vers = false;
16022
16023         if (tp->fw_ver[0] != 0)
16024                 vpd_vers = true;
16025
16026         if (tg3_flag(tp, NO_NVRAM)) {
16027                 strcat(tp->fw_ver, "sb");
16028                 tg3_read_otp_ver(tp);
16029                 return;
16030         }
16031
16032         if (tg3_nvram_read(tp, 0, &val))
16033                 return;
16034
16035         if (val == TG3_EEPROM_MAGIC)
16036                 tg3_read_bc_ver(tp);
16037         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16038                 tg3_read_sb_ver(tp, val);
16039         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16040                 tg3_read_hwsb_ver(tp);
16041
16042         if (tg3_flag(tp, ENABLE_ASF)) {
16043                 if (tg3_flag(tp, ENABLE_APE)) {
16044                         tg3_probe_ncsi(tp);
16045                         if (!vpd_vers)
16046                                 tg3_read_dash_ver(tp);
16047                 } else if (!vpd_vers) {
16048                         tg3_read_mgmtfw_ver(tp);
16049                 }
16050         }
16051
16052         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16053 }
16054
16055 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16056 {
16057         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16058                 return TG3_RX_RET_MAX_SIZE_5717;
16059         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16060                 return TG3_RX_RET_MAX_SIZE_5700;
16061         else
16062                 return TG3_RX_RET_MAX_SIZE_5705;
16063 }
16064
16065 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16066         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16067         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16068         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16069         { },
16070 };
16071
16072 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16073 {
16074         struct pci_dev *peer;
16075         unsigned int func, devnr = tp->pdev->devfn & ~7;
16076
16077         for (func = 0; func < 8; func++) {
16078                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16079                 if (peer && peer != tp->pdev)
16080                         break;
16081                 pci_dev_put(peer);
16082         }
16083         /* 5704 can be configured in single-port mode, set peer to
16084          * tp->pdev in that case.
16085          */
16086         if (!peer) {
16087                 peer = tp->pdev;
16088                 return peer;
16089         }
16090
16091         /*
16092          * We don't need to keep the refcount elevated; there's no way
16093          * to remove one half of this device without removing the other
16094          */
16095         pci_dev_put(peer);
16096
16097         return peer;
16098 }
16099
16100 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16101 {
16102         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16103         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16104                 u32 reg;
16105
16106                 /* All devices that use the alternate
16107                  * ASIC REV location have a CPMU.
16108                  */
16109                 tg3_flag_set(tp, CPMU_PRESENT);
16110
16111                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16112                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16113                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16114                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16115                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16116                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16117                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16118                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16119                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16120                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16121                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16122                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16123                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16124                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16125                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16126                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16127                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16128                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16129                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16130                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16131                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16132                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16133                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16134                 else
16135                         reg = TG3PCI_PRODID_ASICREV;
16136
16137                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16138         }
16139
16140         /* Wrong chip ID in 5752 A0. This code can be removed later
16141          * as A0 is not in production.
16142          */
16143         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16144                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16145
16146         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16147                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16148
16149         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16150             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16151             tg3_asic_rev(tp) == ASIC_REV_5720)
16152                 tg3_flag_set(tp, 5717_PLUS);
16153
16154         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16155             tg3_asic_rev(tp) == ASIC_REV_57766)
16156                 tg3_flag_set(tp, 57765_CLASS);
16157
16158         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16159              tg3_asic_rev(tp) == ASIC_REV_5762)
16160                 tg3_flag_set(tp, 57765_PLUS);
16161
16162         /* Intentionally exclude ASIC_REV_5906 */
16163         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16164             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16165             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16166             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16167             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16168             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16169             tg3_flag(tp, 57765_PLUS))
16170                 tg3_flag_set(tp, 5755_PLUS);
16171
16172         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16173             tg3_asic_rev(tp) == ASIC_REV_5714)
16174                 tg3_flag_set(tp, 5780_CLASS);
16175
16176         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16177             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16178             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16179             tg3_flag(tp, 5755_PLUS) ||
16180             tg3_flag(tp, 5780_CLASS))
16181                 tg3_flag_set(tp, 5750_PLUS);
16182
16183         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16184             tg3_flag(tp, 5750_PLUS))
16185                 tg3_flag_set(tp, 5705_PLUS);
16186 }
16187
16188 static bool tg3_10_100_only_device(struct tg3 *tp,
16189                                    const struct pci_device_id *ent)
16190 {
16191         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16192
16193         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16194              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16195             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16196                 return true;
16197
16198         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16199                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16200                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16201                                 return true;
16202                 } else {
16203                         return true;
16204                 }
16205         }
16206
16207         return false;
16208 }
16209
16210 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16211 {
16212         u32 misc_ctrl_reg;
16213         u32 pci_state_reg, grc_misc_cfg;
16214         u32 val;
16215         u16 pci_cmd;
16216         int err;
16217
16218         /* Force memory write invalidate off.  If we leave it on,
16219          * then on 5700_BX chips we have to enable a workaround.
16220          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16221          * to match the cacheline size.  The Broadcom driver have this
16222          * workaround but turns MWI off all the times so never uses
16223          * it.  This seems to suggest that the workaround is insufficient.
16224          */
16225         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16226         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16227         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16228
16229         /* Important! -- Make sure register accesses are byteswapped
16230          * correctly.  Also, for those chips that require it, make
16231          * sure that indirect register accesses are enabled before
16232          * the first operation.
16233          */
16234         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16235                               &misc_ctrl_reg);
16236         tp->misc_host_ctrl |= (misc_ctrl_reg &
16237                                MISC_HOST_CTRL_CHIPREV);
16238         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16239                                tp->misc_host_ctrl);
16240
16241         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16242
16243         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16244          * we need to disable memory and use config. cycles
16245          * only to access all registers. The 5702/03 chips
16246          * can mistakenly decode the special cycles from the
16247          * ICH chipsets as memory write cycles, causing corruption
16248          * of register and memory space. Only certain ICH bridges
16249          * will drive special cycles with non-zero data during the
16250          * address phase which can fall within the 5703's address
16251          * range. This is not an ICH bug as the PCI spec allows
16252          * non-zero address during special cycles. However, only
16253          * these ICH bridges are known to drive non-zero addresses
16254          * during special cycles.
16255          *
16256          * Since special cycles do not cross PCI bridges, we only
16257          * enable this workaround if the 5703 is on the secondary
16258          * bus of these ICH bridges.
16259          */
16260         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16261             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16262                 static struct tg3_dev_id {
16263                         u32     vendor;
16264                         u32     device;
16265                         u32     rev;
16266                 } ich_chipsets[] = {
16267                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16268                           PCI_ANY_ID },
16269                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16270                           PCI_ANY_ID },
16271                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16272                           0xa },
16273                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16274                           PCI_ANY_ID },
16275                         { },
16276                 };
16277                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16278                 struct pci_dev *bridge = NULL;
16279
16280                 while (pci_id->vendor != 0) {
16281                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16282                                                 bridge);
16283                         if (!bridge) {
16284                                 pci_id++;
16285                                 continue;
16286                         }
16287                         if (pci_id->rev != PCI_ANY_ID) {
16288                                 if (bridge->revision > pci_id->rev)
16289                                         continue;
16290                         }
16291                         if (bridge->subordinate &&
16292                             (bridge->subordinate->number ==
16293                              tp->pdev->bus->number)) {
16294                                 tg3_flag_set(tp, ICH_WORKAROUND);
16295                                 pci_dev_put(bridge);
16296                                 break;
16297                         }
16298                 }
16299         }
16300
16301         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16302                 static struct tg3_dev_id {
16303                         u32     vendor;
16304                         u32     device;
16305                 } bridge_chipsets[] = {
16306                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16307                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16308                         { },
16309                 };
16310                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16311                 struct pci_dev *bridge = NULL;
16312
16313                 while (pci_id->vendor != 0) {
16314                         bridge = pci_get_device(pci_id->vendor,
16315                                                 pci_id->device,
16316                                                 bridge);
16317                         if (!bridge) {
16318                                 pci_id++;
16319                                 continue;
16320                         }
16321                         if (bridge->subordinate &&
16322                             (bridge->subordinate->number <=
16323                              tp->pdev->bus->number) &&
16324                             (bridge->subordinate->busn_res.end >=
16325                              tp->pdev->bus->number)) {
16326                                 tg3_flag_set(tp, 5701_DMA_BUG);
16327                                 pci_dev_put(bridge);
16328                                 break;
16329                         }
16330                 }
16331         }
16332
16333         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16334          * DMA addresses > 40-bit. This bridge may have other additional
16335          * 57xx devices behind it in some 4-port NIC designs for example.
16336          * Any tg3 device found behind the bridge will also need the 40-bit
16337          * DMA workaround.
16338          */
16339         if (tg3_flag(tp, 5780_CLASS)) {
16340                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16341                 tp->msi_cap = tp->pdev->msi_cap;
16342         } else {
16343                 struct pci_dev *bridge = NULL;
16344
16345                 do {
16346                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16347                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16348                                                 bridge);
16349                         if (bridge && bridge->subordinate &&
16350                             (bridge->subordinate->number <=
16351                              tp->pdev->bus->number) &&
16352                             (bridge->subordinate->busn_res.end >=
16353                              tp->pdev->bus->number)) {
16354                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16355                                 pci_dev_put(bridge);
16356                                 break;
16357                         }
16358                 } while (bridge);
16359         }
16360
16361         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16362             tg3_asic_rev(tp) == ASIC_REV_5714)
16363                 tp->pdev_peer = tg3_find_peer(tp);
16364
16365         /* Determine TSO capabilities */
16366         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16367                 ; /* Do nothing. HW bug. */
16368         else if (tg3_flag(tp, 57765_PLUS))
16369                 tg3_flag_set(tp, HW_TSO_3);
16370         else if (tg3_flag(tp, 5755_PLUS) ||
16371                  tg3_asic_rev(tp) == ASIC_REV_5906)
16372                 tg3_flag_set(tp, HW_TSO_2);
16373         else if (tg3_flag(tp, 5750_PLUS)) {
16374                 tg3_flag_set(tp, HW_TSO_1);
16375                 tg3_flag_set(tp, TSO_BUG);
16376                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16377                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16378                         tg3_flag_clear(tp, TSO_BUG);
16379         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16380                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16381                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16382                 tg3_flag_set(tp, FW_TSO);
16383                 tg3_flag_set(tp, TSO_BUG);
16384                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16385                         tp->fw_needed = FIRMWARE_TG3TSO5;
16386                 else
16387                         tp->fw_needed = FIRMWARE_TG3TSO;
16388         }
16389
16390         /* Selectively allow TSO based on operating conditions */
16391         if (tg3_flag(tp, HW_TSO_1) ||
16392             tg3_flag(tp, HW_TSO_2) ||
16393             tg3_flag(tp, HW_TSO_3) ||
16394             tg3_flag(tp, FW_TSO)) {
16395                 /* For firmware TSO, assume ASF is disabled.
16396                  * We'll disable TSO later if we discover ASF
16397                  * is enabled in tg3_get_eeprom_hw_cfg().
16398                  */
16399                 tg3_flag_set(tp, TSO_CAPABLE);
16400         } else {
16401                 tg3_flag_clear(tp, TSO_CAPABLE);
16402                 tg3_flag_clear(tp, TSO_BUG);
16403                 tp->fw_needed = NULL;
16404         }
16405
16406         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16407                 tp->fw_needed = FIRMWARE_TG3;
16408
16409         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16410                 tp->fw_needed = FIRMWARE_TG357766;
16411
16412         tp->irq_max = 1;
16413
16414         if (tg3_flag(tp, 5750_PLUS)) {
16415                 tg3_flag_set(tp, SUPPORT_MSI);
16416                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16417                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16418                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16419                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16420                      tp->pdev_peer == tp->pdev))
16421                         tg3_flag_clear(tp, SUPPORT_MSI);
16422
16423                 if (tg3_flag(tp, 5755_PLUS) ||
16424                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16425                         tg3_flag_set(tp, 1SHOT_MSI);
16426                 }
16427
16428                 if (tg3_flag(tp, 57765_PLUS)) {
16429                         tg3_flag_set(tp, SUPPORT_MSIX);
16430                         tp->irq_max = TG3_IRQ_MAX_VECS;
16431                 }
16432         }
16433
16434         tp->txq_max = 1;
16435         tp->rxq_max = 1;
16436         if (tp->irq_max > 1) {
16437                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16438                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16439
16440                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16441                     tg3_asic_rev(tp) == ASIC_REV_5720)
16442                         tp->txq_max = tp->irq_max - 1;
16443         }
16444
16445         if (tg3_flag(tp, 5755_PLUS) ||
16446             tg3_asic_rev(tp) == ASIC_REV_5906)
16447                 tg3_flag_set(tp, SHORT_DMA_BUG);
16448
16449         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16450                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16451
16452         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16453             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16454             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16455             tg3_asic_rev(tp) == ASIC_REV_5762)
16456                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16457
16458         if (tg3_flag(tp, 57765_PLUS) &&
16459             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16460                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16461
16462         if (!tg3_flag(tp, 5705_PLUS) ||
16463             tg3_flag(tp, 5780_CLASS) ||
16464             tg3_flag(tp, USE_JUMBO_BDFLAG))
16465                 tg3_flag_set(tp, JUMBO_CAPABLE);
16466
16467         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16468                               &pci_state_reg);
16469
16470         if (pci_is_pcie(tp->pdev)) {
16471                 u16 lnkctl;
16472
16473                 tg3_flag_set(tp, PCI_EXPRESS);
16474
16475                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16476                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16477                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16478                                 tg3_flag_clear(tp, HW_TSO_2);
16479                                 tg3_flag_clear(tp, TSO_CAPABLE);
16480                         }
16481                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16482                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16483                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16484                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16485                                 tg3_flag_set(tp, CLKREQ_BUG);
16486                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16487                         tg3_flag_set(tp, L1PLLPD_EN);
16488                 }
16489         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16490                 /* BCM5785 devices are effectively PCIe devices, and should
16491                  * follow PCIe codepaths, but do not have a PCIe capabilities
16492                  * section.
16493                  */
16494                 tg3_flag_set(tp, PCI_EXPRESS);
16495         } else if (!tg3_flag(tp, 5705_PLUS) ||
16496                    tg3_flag(tp, 5780_CLASS)) {
16497                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16498                 if (!tp->pcix_cap) {
16499                         dev_err(&tp->pdev->dev,
16500                                 "Cannot find PCI-X capability, aborting\n");
16501                         return -EIO;
16502                 }
16503
16504                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16505                         tg3_flag_set(tp, PCIX_MODE);
16506         }
16507
16508         /* If we have an AMD 762 or VIA K8T800 chipset, write
16509          * reordering to the mailbox registers done by the host
16510          * controller can cause major troubles.  We read back from
16511          * every mailbox register write to force the writes to be
16512          * posted to the chip in order.
16513          */
16514         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16515             !tg3_flag(tp, PCI_EXPRESS))
16516                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16517
16518         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16519                              &tp->pci_cacheline_sz);
16520         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16521                              &tp->pci_lat_timer);
16522         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16523             tp->pci_lat_timer < 64) {
16524                 tp->pci_lat_timer = 64;
16525                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16526                                       tp->pci_lat_timer);
16527         }
16528
16529         /* Important! -- It is critical that the PCI-X hw workaround
16530          * situation is decided before the first MMIO register access.
16531          */
16532         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16533                 /* 5700 BX chips need to have their TX producer index
16534                  * mailboxes written twice to workaround a bug.
16535                  */
16536                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16537
16538                 /* If we are in PCI-X mode, enable register write workaround.
16539                  *
16540                  * The workaround is to use indirect register accesses
16541                  * for all chip writes not to mailbox registers.
16542                  */
16543                 if (tg3_flag(tp, PCIX_MODE)) {
16544                         u32 pm_reg;
16545
16546                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16547
16548                         /* The chip can have it's power management PCI config
16549                          * space registers clobbered due to this bug.
16550                          * So explicitly force the chip into D0 here.
16551                          */
16552                         pci_read_config_dword(tp->pdev,
16553                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16554                                               &pm_reg);
16555                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16556                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16557                         pci_write_config_dword(tp->pdev,
16558                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16559                                                pm_reg);
16560
16561                         /* Also, force SERR#/PERR# in PCI command. */
16562                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16563                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16564                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16565                 }
16566         }
16567
16568         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16569                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16570         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16571                 tg3_flag_set(tp, PCI_32BIT);
16572
16573         /* Chip-specific fixup from Broadcom driver */
16574         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16575             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16576                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16577                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16578         }
16579
16580         /* Default fast path register access methods */
16581         tp->read32 = tg3_read32;
16582         tp->write32 = tg3_write32;
16583         tp->read32_mbox = tg3_read32;
16584         tp->write32_mbox = tg3_write32;
16585         tp->write32_tx_mbox = tg3_write32;
16586         tp->write32_rx_mbox = tg3_write32;
16587
16588         /* Various workaround register access methods */
16589         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16590                 tp->write32 = tg3_write_indirect_reg32;
16591         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16592                  (tg3_flag(tp, PCI_EXPRESS) &&
16593                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16594                 /*
16595                  * Back to back register writes can cause problems on these
16596                  * chips, the workaround is to read back all reg writes
16597                  * except those to mailbox regs.
16598                  *
16599                  * See tg3_write_indirect_reg32().
16600                  */
16601                 tp->write32 = tg3_write_flush_reg32;
16602         }
16603
16604         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16605                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16606                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16607                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16608         }
16609
16610         if (tg3_flag(tp, ICH_WORKAROUND)) {
16611                 tp->read32 = tg3_read_indirect_reg32;
16612                 tp->write32 = tg3_write_indirect_reg32;
16613                 tp->read32_mbox = tg3_read_indirect_mbox;
16614                 tp->write32_mbox = tg3_write_indirect_mbox;
16615                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16616                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16617
16618                 iounmap(tp->regs);
16619                 tp->regs = NULL;
16620
16621                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16622                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16623                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16624         }
16625         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16626                 tp->read32_mbox = tg3_read32_mbox_5906;
16627                 tp->write32_mbox = tg3_write32_mbox_5906;
16628                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16629                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16630         }
16631
16632         if (tp->write32 == tg3_write_indirect_reg32 ||
16633             (tg3_flag(tp, PCIX_MODE) &&
16634              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16635               tg3_asic_rev(tp) == ASIC_REV_5701)))
16636                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16637
16638         /* The memory arbiter has to be enabled in order for SRAM accesses
16639          * to succeed.  Normally on powerup the tg3 chip firmware will make
16640          * sure it is enabled, but other entities such as system netboot
16641          * code might disable it.
16642          */
16643         val = tr32(MEMARB_MODE);
16644         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16645
16646         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16647         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16648             tg3_flag(tp, 5780_CLASS)) {
16649                 if (tg3_flag(tp, PCIX_MODE)) {
16650                         pci_read_config_dword(tp->pdev,
16651                                               tp->pcix_cap + PCI_X_STATUS,
16652                                               &val);
16653                         tp->pci_fn = val & 0x7;
16654                 }
16655         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16656                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16657                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16658                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16659                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16660                         val = tr32(TG3_CPMU_STATUS);
16661
16662                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16663                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16664                 else
16665                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16666                                      TG3_CPMU_STATUS_FSHFT_5719;
16667         }
16668
16669         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16670                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16671                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16672         }
16673
16674         /* Get eeprom hw config before calling tg3_set_power_state().
16675          * In particular, the TG3_FLAG_IS_NIC flag must be
16676          * determined before calling tg3_set_power_state() so that
16677          * we know whether or not to switch out of Vaux power.
16678          * When the flag is set, it means that GPIO1 is used for eeprom
16679          * write protect and also implies that it is a LOM where GPIOs
16680          * are not used to switch power.
16681          */
16682         tg3_get_eeprom_hw_cfg(tp);
16683
16684         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16685                 tg3_flag_clear(tp, TSO_CAPABLE);
16686                 tg3_flag_clear(tp, TSO_BUG);
16687                 tp->fw_needed = NULL;
16688         }
16689
16690         if (tg3_flag(tp, ENABLE_APE)) {
16691                 /* Allow reads and writes to the
16692                  * APE register and memory space.
16693                  */
16694                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16695                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16696                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16697                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16698                                        pci_state_reg);
16699
16700                 tg3_ape_lock_init(tp);
16701                 tp->ape_hb_interval =
16702                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16703         }
16704
16705         /* Set up tp->grc_local_ctrl before calling
16706          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16707          * will bring 5700's external PHY out of reset.
16708          * It is also used as eeprom write protect on LOMs.
16709          */
16710         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16711         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16712             tg3_flag(tp, EEPROM_WRITE_PROT))
16713                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16714                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16715         /* Unused GPIO3 must be driven as output on 5752 because there
16716          * are no pull-up resistors on unused GPIO pins.
16717          */
16718         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16719                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16720
16721         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16722             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16723             tg3_flag(tp, 57765_CLASS))
16724                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16725
16726         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16728                 /* Turn off the debug UART. */
16729                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16730                 if (tg3_flag(tp, IS_NIC))
16731                         /* Keep VMain power. */
16732                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16733                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16734         }
16735
16736         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16737                 tp->grc_local_ctrl |=
16738                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16739
16740         /* Switch out of Vaux if it is a NIC */
16741         tg3_pwrsrc_switch_to_vmain(tp);
16742
16743         /* Derive initial jumbo mode from MTU assigned in
16744          * ether_setup() via the alloc_etherdev() call
16745          */
16746         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16747                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16748
16749         /* Determine WakeOnLan speed to use. */
16750         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16751             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16752             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16753             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16754                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16755         } else {
16756                 tg3_flag_set(tp, WOL_SPEED_100MB);
16757         }
16758
16759         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16760                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16761
16762         /* A few boards don't want Ethernet@WireSpeed phy feature */
16763         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16764             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16765              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16766              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16767             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16768             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16769                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16770
16771         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16772             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16773                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16774         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16775                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16776
16777         if (tg3_flag(tp, 5705_PLUS) &&
16778             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16779             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16780             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16781             !tg3_flag(tp, 57765_PLUS)) {
16782                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16783                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16784                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16785                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16786                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16787                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16788                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16789                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16790                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16791                 } else
16792                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16793         }
16794
16795         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16796             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16797                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16798                 if (tp->phy_otp == 0)
16799                         tp->phy_otp = TG3_OTP_DEFAULT;
16800         }
16801
16802         if (tg3_flag(tp, CPMU_PRESENT))
16803                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16804         else
16805                 tp->mi_mode = MAC_MI_MODE_BASE;
16806
16807         tp->coalesce_mode = 0;
16808         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16809             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16810                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16811
16812         /* Set these bits to enable statistics workaround. */
16813         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16814             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16815             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16816             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16817                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16818                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16819         }
16820
16821         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16822             tg3_asic_rev(tp) == ASIC_REV_57780)
16823                 tg3_flag_set(tp, USE_PHYLIB);
16824
16825         err = tg3_mdio_init(tp);
16826         if (err)
16827                 return err;
16828
16829         /* Initialize data/descriptor byte/word swapping. */
16830         val = tr32(GRC_MODE);
16831         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16832             tg3_asic_rev(tp) == ASIC_REV_5762)
16833                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16834                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16835                         GRC_MODE_B2HRX_ENABLE |
16836                         GRC_MODE_HTX2B_ENABLE |
16837                         GRC_MODE_HOST_STACKUP);
16838         else
16839                 val &= GRC_MODE_HOST_STACKUP;
16840
16841         tw32(GRC_MODE, val | tp->grc_mode);
16842
16843         tg3_switch_clocks(tp);
16844
16845         /* Clear this out for sanity. */
16846         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16847
16848         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16849         tw32(TG3PCI_REG_BASE_ADDR, 0);
16850
16851         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16852                               &pci_state_reg);
16853         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16854             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16855                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16856                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16857                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16858                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16859                         void __iomem *sram_base;
16860
16861                         /* Write some dummy words into the SRAM status block
16862                          * area, see if it reads back correctly.  If the return
16863                          * value is bad, force enable the PCIX workaround.
16864                          */
16865                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16866
16867                         writel(0x00000000, sram_base);
16868                         writel(0x00000000, sram_base + 4);
16869                         writel(0xffffffff, sram_base + 4);
16870                         if (readl(sram_base) != 0x00000000)
16871                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16872                 }
16873         }
16874
16875         udelay(50);
16876         tg3_nvram_init(tp);
16877
16878         /* If the device has an NVRAM, no need to load patch firmware */
16879         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16880             !tg3_flag(tp, NO_NVRAM))
16881                 tp->fw_needed = NULL;
16882
16883         grc_misc_cfg = tr32(GRC_MISC_CFG);
16884         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16885
16886         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16887             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16888              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16889                 tg3_flag_set(tp, IS_5788);
16890
16891         if (!tg3_flag(tp, IS_5788) &&
16892             tg3_asic_rev(tp) != ASIC_REV_5700)
16893                 tg3_flag_set(tp, TAGGED_STATUS);
16894         if (tg3_flag(tp, TAGGED_STATUS)) {
16895                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16896                                       HOSTCC_MODE_CLRTICK_TXBD);
16897
16898                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16899                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16900                                        tp->misc_host_ctrl);
16901         }
16902
16903         /* Preserve the APE MAC_MODE bits */
16904         if (tg3_flag(tp, ENABLE_APE))
16905                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16906         else
16907                 tp->mac_mode = 0;
16908
16909         if (tg3_10_100_only_device(tp, ent))
16910                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16911
16912         err = tg3_phy_probe(tp);
16913         if (err) {
16914                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16915                 /* ... but do not return immediately ... */
16916                 tg3_mdio_fini(tp);
16917         }
16918
16919         tg3_read_vpd(tp);
16920         tg3_read_fw_ver(tp);
16921
16922         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16923                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16924         } else {
16925                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16926                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16927                 else
16928                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16929         }
16930
16931         /* 5700 {AX,BX} chips have a broken status block link
16932          * change bit implementation, so we must use the
16933          * status register in those cases.
16934          */
16935         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16936                 tg3_flag_set(tp, USE_LINKCHG_REG);
16937         else
16938                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16939
16940         /* The led_ctrl is set during tg3_phy_probe, here we might
16941          * have to force the link status polling mechanism based
16942          * upon subsystem IDs.
16943          */
16944         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16945             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16946             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16947                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16948                 tg3_flag_set(tp, USE_LINKCHG_REG);
16949         }
16950
16951         /* For all SERDES we poll the MAC status register. */
16952         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16953                 tg3_flag_set(tp, POLL_SERDES);
16954         else
16955                 tg3_flag_clear(tp, POLL_SERDES);
16956
16957         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16958                 tg3_flag_set(tp, POLL_CPMU_LINK);
16959
16960         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16961         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16962         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16963             tg3_flag(tp, PCIX_MODE)) {
16964                 tp->rx_offset = NET_SKB_PAD;
16965 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16966                 tp->rx_copy_thresh = ~(u16)0;
16967 #endif
16968         }
16969
16970         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16971         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16972         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16973
16974         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16975
16976         /* Increment the rx prod index on the rx std ring by at most
16977          * 8 for these chips to workaround hw errata.
16978          */
16979         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16980             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16981             tg3_asic_rev(tp) == ASIC_REV_5755)
16982                 tp->rx_std_max_post = 8;
16983
16984         if (tg3_flag(tp, ASPM_WORKAROUND))
16985                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16986                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16987
16988         return err;
16989 }
16990
16991 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16992 {
16993         u32 hi, lo, mac_offset;
16994         int addr_ok = 0;
16995         int err;
16996
16997         if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16998                 return 0;
16999
17000         if (tg3_flag(tp, IS_SSB_CORE)) {
17001                 err = ssb_gige_get_macaddr(tp->pdev, addr);
17002                 if (!err && is_valid_ether_addr(addr))
17003                         return 0;
17004         }
17005
17006         mac_offset = 0x7c;
17007         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17008             tg3_flag(tp, 5780_CLASS)) {
17009                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17010                         mac_offset = 0xcc;
17011                 if (tg3_nvram_lock(tp))
17012                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17013                 else
17014                         tg3_nvram_unlock(tp);
17015         } else if (tg3_flag(tp, 5717_PLUS)) {
17016                 if (tp->pci_fn & 1)
17017                         mac_offset = 0xcc;
17018                 if (tp->pci_fn > 1)
17019                         mac_offset += 0x18c;
17020         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17021                 mac_offset = 0x10;
17022
17023         /* First try to get it from MAC address mailbox. */
17024         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17025         if ((hi >> 16) == 0x484b) {
17026                 addr[0] = (hi >>  8) & 0xff;
17027                 addr[1] = (hi >>  0) & 0xff;
17028
17029                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17030                 addr[2] = (lo >> 24) & 0xff;
17031                 addr[3] = (lo >> 16) & 0xff;
17032                 addr[4] = (lo >>  8) & 0xff;
17033                 addr[5] = (lo >>  0) & 0xff;
17034
17035                 /* Some old bootcode may report a 0 MAC address in SRAM */
17036                 addr_ok = is_valid_ether_addr(addr);
17037         }
17038         if (!addr_ok) {
17039                 /* Next, try NVRAM. */
17040                 if (!tg3_flag(tp, NO_NVRAM) &&
17041                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17042                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17043                         memcpy(&addr[0], ((char *)&hi) + 2, 2);
17044                         memcpy(&addr[2], (char *)&lo, sizeof(lo));
17045                 }
17046                 /* Finally just fetch it out of the MAC control regs. */
17047                 else {
17048                         hi = tr32(MAC_ADDR_0_HIGH);
17049                         lo = tr32(MAC_ADDR_0_LOW);
17050
17051                         addr[5] = lo & 0xff;
17052                         addr[4] = (lo >> 8) & 0xff;
17053                         addr[3] = (lo >> 16) & 0xff;
17054                         addr[2] = (lo >> 24) & 0xff;
17055                         addr[1] = hi & 0xff;
17056                         addr[0] = (hi >> 8) & 0xff;
17057                 }
17058         }
17059
17060         if (!is_valid_ether_addr(addr))
17061                 return -EINVAL;
17062         return 0;
17063 }
17064
17065 #define BOUNDARY_SINGLE_CACHELINE       1
17066 #define BOUNDARY_MULTI_CACHELINE        2
17067
17068 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17069 {
17070         int cacheline_size;
17071         u8 byte;
17072         int goal;
17073
17074         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17075         if (byte == 0)
17076                 cacheline_size = 1024;
17077         else
17078                 cacheline_size = (int) byte * 4;
17079
17080         /* On 5703 and later chips, the boundary bits have no
17081          * effect.
17082          */
17083         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17084             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17085             !tg3_flag(tp, PCI_EXPRESS))
17086                 goto out;
17087
17088 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17089         goal = BOUNDARY_MULTI_CACHELINE;
17090 #else
17091 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17092         goal = BOUNDARY_SINGLE_CACHELINE;
17093 #else
17094         goal = 0;
17095 #endif
17096 #endif
17097
17098         if (tg3_flag(tp, 57765_PLUS)) {
17099                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17100                 goto out;
17101         }
17102
17103         if (!goal)
17104                 goto out;
17105
17106         /* PCI controllers on most RISC systems tend to disconnect
17107          * when a device tries to burst across a cache-line boundary.
17108          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17109          *
17110          * Unfortunately, for PCI-E there are only limited
17111          * write-side controls for this, and thus for reads
17112          * we will still get the disconnects.  We'll also waste
17113          * these PCI cycles for both read and write for chips
17114          * other than 5700 and 5701 which do not implement the
17115          * boundary bits.
17116          */
17117         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17118                 switch (cacheline_size) {
17119                 case 16:
17120                 case 32:
17121                 case 64:
17122                 case 128:
17123                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17124                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17125                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17126                         } else {
17127                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17128                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17129                         }
17130                         break;
17131
17132                 case 256:
17133                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17134                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17135                         break;
17136
17137                 default:
17138                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17139                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17140                         break;
17141                 }
17142         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17143                 switch (cacheline_size) {
17144                 case 16:
17145                 case 32:
17146                 case 64:
17147                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17148                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17149                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17150                                 break;
17151                         }
17152                         fallthrough;
17153                 case 128:
17154                 default:
17155                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17156                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17157                         break;
17158                 }
17159         } else {
17160                 switch (cacheline_size) {
17161                 case 16:
17162                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17164                                         DMA_RWCTRL_WRITE_BNDRY_16);
17165                                 break;
17166                         }
17167                         fallthrough;
17168                 case 32:
17169                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17170                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17171                                         DMA_RWCTRL_WRITE_BNDRY_32);
17172                                 break;
17173                         }
17174                         fallthrough;
17175                 case 64:
17176                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17178                                         DMA_RWCTRL_WRITE_BNDRY_64);
17179                                 break;
17180                         }
17181                         fallthrough;
17182                 case 128:
17183                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17184                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17185                                         DMA_RWCTRL_WRITE_BNDRY_128);
17186                                 break;
17187                         }
17188                         fallthrough;
17189                 case 256:
17190                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17191                                 DMA_RWCTRL_WRITE_BNDRY_256);
17192                         break;
17193                 case 512:
17194                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17195                                 DMA_RWCTRL_WRITE_BNDRY_512);
17196                         break;
17197                 case 1024:
17198                 default:
17199                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17200                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17201                         break;
17202                 }
17203         }
17204
17205 out:
17206         return val;
17207 }
17208
17209 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17210                            int size, bool to_device)
17211 {
17212         struct tg3_internal_buffer_desc test_desc;
17213         u32 sram_dma_descs;
17214         int i, ret;
17215
17216         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17217
17218         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17219         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17220         tw32(RDMAC_STATUS, 0);
17221         tw32(WDMAC_STATUS, 0);
17222
17223         tw32(BUFMGR_MODE, 0);
17224         tw32(FTQ_RESET, 0);
17225
17226         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17227         test_desc.addr_lo = buf_dma & 0xffffffff;
17228         test_desc.nic_mbuf = 0x00002100;
17229         test_desc.len = size;
17230
17231         /*
17232          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17233          * the *second* time the tg3 driver was getting loaded after an
17234          * initial scan.
17235          *
17236          * Broadcom tells me:
17237          *   ...the DMA engine is connected to the GRC block and a DMA
17238          *   reset may affect the GRC block in some unpredictable way...
17239          *   The behavior of resets to individual blocks has not been tested.
17240          *
17241          * Broadcom noted the GRC reset will also reset all sub-components.
17242          */
17243         if (to_device) {
17244                 test_desc.cqid_sqid = (13 << 8) | 2;
17245
17246                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17247                 udelay(40);
17248         } else {
17249                 test_desc.cqid_sqid = (16 << 8) | 7;
17250
17251                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17252                 udelay(40);
17253         }
17254         test_desc.flags = 0x00000005;
17255
17256         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17257                 u32 val;
17258
17259                 val = *(((u32 *)&test_desc) + i);
17260                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17261                                        sram_dma_descs + (i * sizeof(u32)));
17262                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17263         }
17264         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17265
17266         if (to_device)
17267                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17268         else
17269                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17270
17271         ret = -ENODEV;
17272         for (i = 0; i < 40; i++) {
17273                 u32 val;
17274
17275                 if (to_device)
17276                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17277                 else
17278                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17279                 if ((val & 0xffff) == sram_dma_descs) {
17280                         ret = 0;
17281                         break;
17282                 }
17283
17284                 udelay(100);
17285         }
17286
17287         return ret;
17288 }
17289
17290 #define TEST_BUFFER_SIZE        0x2000
17291
17292 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17293         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17294         { },
17295 };
17296
17297 static int tg3_test_dma(struct tg3 *tp)
17298 {
17299         dma_addr_t buf_dma;
17300         u32 *buf, saved_dma_rwctrl;
17301         int ret = 0;
17302
17303         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17304                                  &buf_dma, GFP_KERNEL);
17305         if (!buf) {
17306                 ret = -ENOMEM;
17307                 goto out_nofree;
17308         }
17309
17310         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17311                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17312
17313         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17314
17315         if (tg3_flag(tp, 57765_PLUS))
17316                 goto out;
17317
17318         if (tg3_flag(tp, PCI_EXPRESS)) {
17319                 /* DMA read watermark not used on PCIE */
17320                 tp->dma_rwctrl |= 0x00180000;
17321         } else if (!tg3_flag(tp, PCIX_MODE)) {
17322                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17323                     tg3_asic_rev(tp) == ASIC_REV_5750)
17324                         tp->dma_rwctrl |= 0x003f0000;
17325                 else
17326                         tp->dma_rwctrl |= 0x003f000f;
17327         } else {
17328                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17329                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17330                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17331                         u32 read_water = 0x7;
17332
17333                         /* If the 5704 is behind the EPB bridge, we can
17334                          * do the less restrictive ONE_DMA workaround for
17335                          * better performance.
17336                          */
17337                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17338                             tg3_asic_rev(tp) == ASIC_REV_5704)
17339                                 tp->dma_rwctrl |= 0x8000;
17340                         else if (ccval == 0x6 || ccval == 0x7)
17341                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17342
17343                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17344                                 read_water = 4;
17345                         /* Set bit 23 to enable PCIX hw bug fix */
17346                         tp->dma_rwctrl |=
17347                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17348                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17349                                 (1 << 23);
17350                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17351                         /* 5780 always in PCIX mode */
17352                         tp->dma_rwctrl |= 0x00144000;
17353                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17354                         /* 5714 always in PCIX mode */
17355                         tp->dma_rwctrl |= 0x00148000;
17356                 } else {
17357                         tp->dma_rwctrl |= 0x001b000f;
17358                 }
17359         }
17360         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17361                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17362
17363         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17364             tg3_asic_rev(tp) == ASIC_REV_5704)
17365                 tp->dma_rwctrl &= 0xfffffff0;
17366
17367         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17368             tg3_asic_rev(tp) == ASIC_REV_5701) {
17369                 /* Remove this if it causes problems for some boards. */
17370                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17371
17372                 /* On 5700/5701 chips, we need to set this bit.
17373                  * Otherwise the chip will issue cacheline transactions
17374                  * to streamable DMA memory with not all the byte
17375                  * enables turned on.  This is an error on several
17376                  * RISC PCI controllers, in particular sparc64.
17377                  *
17378                  * On 5703/5704 chips, this bit has been reassigned
17379                  * a different meaning.  In particular, it is used
17380                  * on those chips to enable a PCI-X workaround.
17381                  */
17382                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17383         }
17384
17385         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17386
17387
17388         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17389             tg3_asic_rev(tp) != ASIC_REV_5701)
17390                 goto out;
17391
17392         /* It is best to perform DMA test with maximum write burst size
17393          * to expose the 5700/5701 write DMA bug.
17394          */
17395         saved_dma_rwctrl = tp->dma_rwctrl;
17396         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17397         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17398
17399         while (1) {
17400                 u32 *p = buf, i;
17401
17402                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17403                         p[i] = i;
17404
17405                 /* Send the buffer to the chip. */
17406                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17407                 if (ret) {
17408                         dev_err(&tp->pdev->dev,
17409                                 "%s: Buffer write failed. err = %d\n",
17410                                 __func__, ret);
17411                         break;
17412                 }
17413
17414                 /* Now read it back. */
17415                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17416                 if (ret) {
17417                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17418                                 "err = %d\n", __func__, ret);
17419                         break;
17420                 }
17421
17422                 /* Verify it. */
17423                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17424                         if (p[i] == i)
17425                                 continue;
17426
17427                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17428                             DMA_RWCTRL_WRITE_BNDRY_16) {
17429                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17430                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17431                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17432                                 break;
17433                         } else {
17434                                 dev_err(&tp->pdev->dev,
17435                                         "%s: Buffer corrupted on read back! "
17436                                         "(%d != %d)\n", __func__, p[i], i);
17437                                 ret = -ENODEV;
17438                                 goto out;
17439                         }
17440                 }
17441
17442                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17443                         /* Success. */
17444                         ret = 0;
17445                         break;
17446                 }
17447         }
17448         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17449             DMA_RWCTRL_WRITE_BNDRY_16) {
17450                 /* DMA test passed without adjusting DMA boundary,
17451                  * now look for chipsets that are known to expose the
17452                  * DMA bug without failing the test.
17453                  */
17454                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17455                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17456                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17457                 } else {
17458                         /* Safe to use the calculated DMA boundary. */
17459                         tp->dma_rwctrl = saved_dma_rwctrl;
17460                 }
17461
17462                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17463         }
17464
17465 out:
17466         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17467 out_nofree:
17468         return ret;
17469 }
17470
17471 static void tg3_init_bufmgr_config(struct tg3 *tp)
17472 {
17473         if (tg3_flag(tp, 57765_PLUS)) {
17474                 tp->bufmgr_config.mbuf_read_dma_low_water =
17475                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17476                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17477                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17478                 tp->bufmgr_config.mbuf_high_water =
17479                         DEFAULT_MB_HIGH_WATER_57765;
17480
17481                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17482                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17483                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17484                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17485                 tp->bufmgr_config.mbuf_high_water_jumbo =
17486                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17487         } else if (tg3_flag(tp, 5705_PLUS)) {
17488                 tp->bufmgr_config.mbuf_read_dma_low_water =
17489                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17490                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17491                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17492                 tp->bufmgr_config.mbuf_high_water =
17493                         DEFAULT_MB_HIGH_WATER_5705;
17494                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17495                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17496                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17497                         tp->bufmgr_config.mbuf_high_water =
17498                                 DEFAULT_MB_HIGH_WATER_5906;
17499                 }
17500
17501                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17502                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17503                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17504                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17505                 tp->bufmgr_config.mbuf_high_water_jumbo =
17506                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17507         } else {
17508                 tp->bufmgr_config.mbuf_read_dma_low_water =
17509                         DEFAULT_MB_RDMA_LOW_WATER;
17510                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17511                         DEFAULT_MB_MACRX_LOW_WATER;
17512                 tp->bufmgr_config.mbuf_high_water =
17513                         DEFAULT_MB_HIGH_WATER;
17514
17515                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17516                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17517                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17518                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17519                 tp->bufmgr_config.mbuf_high_water_jumbo =
17520                         DEFAULT_MB_HIGH_WATER_JUMBO;
17521         }
17522
17523         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17524         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17525 }
17526
17527 static char *tg3_phy_string(struct tg3 *tp)
17528 {
17529         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17530         case TG3_PHY_ID_BCM5400:        return "5400";
17531         case TG3_PHY_ID_BCM5401:        return "5401";
17532         case TG3_PHY_ID_BCM5411:        return "5411";
17533         case TG3_PHY_ID_BCM5701:        return "5701";
17534         case TG3_PHY_ID_BCM5703:        return "5703";
17535         case TG3_PHY_ID_BCM5704:        return "5704";
17536         case TG3_PHY_ID_BCM5705:        return "5705";
17537         case TG3_PHY_ID_BCM5750:        return "5750";
17538         case TG3_PHY_ID_BCM5752:        return "5752";
17539         case TG3_PHY_ID_BCM5714:        return "5714";
17540         case TG3_PHY_ID_BCM5780:        return "5780";
17541         case TG3_PHY_ID_BCM5755:        return "5755";
17542         case TG3_PHY_ID_BCM5787:        return "5787";
17543         case TG3_PHY_ID_BCM5784:        return "5784";
17544         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17545         case TG3_PHY_ID_BCM5906:        return "5906";
17546         case TG3_PHY_ID_BCM5761:        return "5761";
17547         case TG3_PHY_ID_BCM5718C:       return "5718C";
17548         case TG3_PHY_ID_BCM5718S:       return "5718S";
17549         case TG3_PHY_ID_BCM57765:       return "57765";
17550         case TG3_PHY_ID_BCM5719C:       return "5719C";
17551         case TG3_PHY_ID_BCM5720C:       return "5720C";
17552         case TG3_PHY_ID_BCM5762:        return "5762C";
17553         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17554         case 0:                 return "serdes";
17555         default:                return "unknown";
17556         }
17557 }
17558
17559 static char *tg3_bus_string(struct tg3 *tp, char *str)
17560 {
17561         if (tg3_flag(tp, PCI_EXPRESS)) {
17562                 strcpy(str, "PCI Express");
17563                 return str;
17564         } else if (tg3_flag(tp, PCIX_MODE)) {
17565                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17566
17567                 strcpy(str, "PCIX:");
17568
17569                 if ((clock_ctrl == 7) ||
17570                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17571                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17572                         strcat(str, "133MHz");
17573                 else if (clock_ctrl == 0)
17574                         strcat(str, "33MHz");
17575                 else if (clock_ctrl == 2)
17576                         strcat(str, "50MHz");
17577                 else if (clock_ctrl == 4)
17578                         strcat(str, "66MHz");
17579                 else if (clock_ctrl == 6)
17580                         strcat(str, "100MHz");
17581         } else {
17582                 strcpy(str, "PCI:");
17583                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17584                         strcat(str, "66MHz");
17585                 else
17586                         strcat(str, "33MHz");
17587         }
17588         if (tg3_flag(tp, PCI_32BIT))
17589                 strcat(str, ":32-bit");
17590         else
17591                 strcat(str, ":64-bit");
17592         return str;
17593 }
17594
17595 static void tg3_init_coal(struct tg3 *tp)
17596 {
17597         struct ethtool_coalesce *ec = &tp->coal;
17598
17599         memset(ec, 0, sizeof(*ec));
17600         ec->cmd = ETHTOOL_GCOALESCE;
17601         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17602         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17603         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17604         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17605         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17606         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17607         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17608         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17609         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17610
17611         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17612                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17613                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17614                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17615                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17616                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17617         }
17618
17619         if (tg3_flag(tp, 5705_PLUS)) {
17620                 ec->rx_coalesce_usecs_irq = 0;
17621                 ec->tx_coalesce_usecs_irq = 0;
17622                 ec->stats_block_coalesce_usecs = 0;
17623         }
17624 }
17625
17626 static int tg3_init_one(struct pci_dev *pdev,
17627                                   const struct pci_device_id *ent)
17628 {
17629         struct net_device *dev;
17630         struct tg3 *tp;
17631         int i, err;
17632         u32 sndmbx, rcvmbx, intmbx;
17633         char str[40];
17634         u64 dma_mask, persist_dma_mask;
17635         netdev_features_t features = 0;
17636         u8 addr[ETH_ALEN] __aligned(2);
17637
17638         err = pci_enable_device(pdev);
17639         if (err) {
17640                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17641                 return err;
17642         }
17643
17644         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17645         if (err) {
17646                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17647                 goto err_out_disable_pdev;
17648         }
17649
17650         pci_set_master(pdev);
17651
17652         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17653         if (!dev) {
17654                 err = -ENOMEM;
17655                 goto err_out_free_res;
17656         }
17657
17658         SET_NETDEV_DEV(dev, &pdev->dev);
17659
17660         tp = netdev_priv(dev);
17661         tp->pdev = pdev;
17662         tp->dev = dev;
17663         tp->rx_mode = TG3_DEF_RX_MODE;
17664         tp->tx_mode = TG3_DEF_TX_MODE;
17665         tp->irq_sync = 1;
17666         tp->pcierr_recovery = false;
17667
17668         if (tg3_debug > 0)
17669                 tp->msg_enable = tg3_debug;
17670         else
17671                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17672
17673         if (pdev_is_ssb_gige_core(pdev)) {
17674                 tg3_flag_set(tp, IS_SSB_CORE);
17675                 if (ssb_gige_must_flush_posted_writes(pdev))
17676                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17677                 if (ssb_gige_one_dma_at_once(pdev))
17678                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17679                 if (ssb_gige_have_roboswitch(pdev)) {
17680                         tg3_flag_set(tp, USE_PHYLIB);
17681                         tg3_flag_set(tp, ROBOSWITCH);
17682                 }
17683                 if (ssb_gige_is_rgmii(pdev))
17684                         tg3_flag_set(tp, RGMII_MODE);
17685         }
17686
17687         /* The word/byte swap controls here control register access byte
17688          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17689          * setting below.
17690          */
17691         tp->misc_host_ctrl =
17692                 MISC_HOST_CTRL_MASK_PCI_INT |
17693                 MISC_HOST_CTRL_WORD_SWAP |
17694                 MISC_HOST_CTRL_INDIR_ACCESS |
17695                 MISC_HOST_CTRL_PCISTATE_RW;
17696
17697         /* The NONFRM (non-frame) byte/word swap controls take effect
17698          * on descriptor entries, anything which isn't packet data.
17699          *
17700          * The StrongARM chips on the board (one for tx, one for rx)
17701          * are running in big-endian mode.
17702          */
17703         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17704                         GRC_MODE_WSWAP_NONFRM_DATA);
17705 #ifdef __BIG_ENDIAN
17706         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17707 #endif
17708         spin_lock_init(&tp->lock);
17709         spin_lock_init(&tp->indirect_lock);
17710         INIT_WORK(&tp->reset_task, tg3_reset_task);
17711
17712         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17713         if (!tp->regs) {
17714                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17715                 err = -ENOMEM;
17716                 goto err_out_free_dev;
17717         }
17718
17719         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17720             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17730             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17731             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17732             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17733             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17734                 tg3_flag_set(tp, ENABLE_APE);
17735                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17736                 if (!tp->aperegs) {
17737                         dev_err(&pdev->dev,
17738                                 "Cannot map APE registers, aborting\n");
17739                         err = -ENOMEM;
17740                         goto err_out_iounmap;
17741                 }
17742         }
17743
17744         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17745         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17746
17747         dev->ethtool_ops = &tg3_ethtool_ops;
17748         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17749         dev->netdev_ops = &tg3_netdev_ops;
17750         dev->irq = pdev->irq;
17751
17752         err = tg3_get_invariants(tp, ent);
17753         if (err) {
17754                 dev_err(&pdev->dev,
17755                         "Problem fetching invariants of chip, aborting\n");
17756                 goto err_out_apeunmap;
17757         }
17758
17759         /* The EPB bridge inside 5714, 5715, and 5780 and any
17760          * device behind the EPB cannot support DMA addresses > 40-bit.
17761          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17762          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17763          * do DMA address check in __tg3_start_xmit().
17764          */
17765         if (tg3_flag(tp, IS_5788))
17766                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17767         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17768                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17769 #ifdef CONFIG_HIGHMEM
17770                 dma_mask = DMA_BIT_MASK(64);
17771 #endif
17772         } else
17773                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17774
17775         /* Configure DMA attributes. */
17776         if (dma_mask > DMA_BIT_MASK(32)) {
17777                 err = dma_set_mask(&pdev->dev, dma_mask);
17778                 if (!err) {
17779                         features |= NETIF_F_HIGHDMA;
17780                         err = dma_set_coherent_mask(&pdev->dev,
17781                                                     persist_dma_mask);
17782                         if (err < 0) {
17783                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17784                                         "DMA for consistent allocations\n");
17785                                 goto err_out_apeunmap;
17786                         }
17787                 }
17788         }
17789         if (err || dma_mask == DMA_BIT_MASK(32)) {
17790                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17791                 if (err) {
17792                         dev_err(&pdev->dev,
17793                                 "No usable DMA configuration, aborting\n");
17794                         goto err_out_apeunmap;
17795                 }
17796         }
17797
17798         tg3_init_bufmgr_config(tp);
17799
17800         /* 5700 B0 chips do not support checksumming correctly due
17801          * to hardware bugs.
17802          */
17803         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17804                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17805
17806                 if (tg3_flag(tp, 5755_PLUS))
17807                         features |= NETIF_F_IPV6_CSUM;
17808         }
17809
17810         /* TSO is on by default on chips that support hardware TSO.
17811          * Firmware TSO on older chips gives lower performance, so it
17812          * is off by default, but can be enabled using ethtool.
17813          */
17814         if ((tg3_flag(tp, HW_TSO_1) ||
17815              tg3_flag(tp, HW_TSO_2) ||
17816              tg3_flag(tp, HW_TSO_3)) &&
17817             (features & NETIF_F_IP_CSUM))
17818                 features |= NETIF_F_TSO;
17819         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17820                 if (features & NETIF_F_IPV6_CSUM)
17821                         features |= NETIF_F_TSO6;
17822                 if (tg3_flag(tp, HW_TSO_3) ||
17823                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17824                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17825                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17826                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17827                     tg3_asic_rev(tp) == ASIC_REV_57780)
17828                         features |= NETIF_F_TSO_ECN;
17829         }
17830
17831         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17832                          NETIF_F_HW_VLAN_CTAG_RX;
17833         dev->vlan_features |= features;
17834
17835         /*
17836          * Add loopback capability only for a subset of devices that support
17837          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17838          * loopback for the remaining devices.
17839          */
17840         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17841             !tg3_flag(tp, CPMU_PRESENT))
17842                 /* Add the loopback capability */
17843                 features |= NETIF_F_LOOPBACK;
17844
17845         dev->hw_features |= features;
17846         dev->priv_flags |= IFF_UNICAST_FLT;
17847
17848         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17849         dev->min_mtu = TG3_MIN_MTU;
17850         dev->max_mtu = TG3_MAX_MTU(tp);
17851
17852         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17853             !tg3_flag(tp, TSO_CAPABLE) &&
17854             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17855                 tg3_flag_set(tp, MAX_RXPEND_64);
17856                 tp->rx_pending = 63;
17857         }
17858
17859         err = tg3_get_device_address(tp, addr);
17860         if (err) {
17861                 dev_err(&pdev->dev,
17862                         "Could not obtain valid ethernet address, aborting\n");
17863                 goto err_out_apeunmap;
17864         }
17865         eth_hw_addr_set(dev, addr);
17866
17867         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17868         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17869         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17870         for (i = 0; i < tp->irq_max; i++) {
17871                 struct tg3_napi *tnapi = &tp->napi[i];
17872
17873                 tnapi->tp = tp;
17874                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17875
17876                 tnapi->int_mbox = intmbx;
17877                 intmbx += 0x8;
17878
17879                 tnapi->consmbox = rcvmbx;
17880                 tnapi->prodmbox = sndmbx;
17881
17882                 if (i)
17883                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17884                 else
17885                         tnapi->coal_now = HOSTCC_MODE_NOW;
17886
17887                 if (!tg3_flag(tp, SUPPORT_MSIX))
17888                         break;
17889
17890                 /*
17891                  * If we support MSIX, we'll be using RSS.  If we're using
17892                  * RSS, the first vector only handles link interrupts and the
17893                  * remaining vectors handle rx and tx interrupts.  Reuse the
17894                  * mailbox values for the next iteration.  The values we setup
17895                  * above are still useful for the single vectored mode.
17896                  */
17897                 if (!i)
17898                         continue;
17899
17900                 rcvmbx += 0x8;
17901
17902                 if (sndmbx & 0x4)
17903                         sndmbx -= 0x4;
17904                 else
17905                         sndmbx += 0xc;
17906         }
17907
17908         /*
17909          * Reset chip in case UNDI or EFI driver did not shutdown
17910          * DMA self test will enable WDMAC and we'll see (spurious)
17911          * pending DMA on the PCI bus at that point.
17912          */
17913         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17914             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17915                 tg3_full_lock(tp, 0);
17916                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17917                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17918                 tg3_full_unlock(tp);
17919         }
17920
17921         err = tg3_test_dma(tp);
17922         if (err) {
17923                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17924                 goto err_out_apeunmap;
17925         }
17926
17927         tg3_init_coal(tp);
17928
17929         pci_set_drvdata(pdev, dev);
17930
17931         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17932             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17933             tg3_asic_rev(tp) == ASIC_REV_5762)
17934                 tg3_flag_set(tp, PTP_CAPABLE);
17935
17936         tg3_timer_init(tp);
17937
17938         tg3_carrier_off(tp);
17939
17940         err = register_netdev(dev);
17941         if (err) {
17942                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17943                 goto err_out_apeunmap;
17944         }
17945
17946         if (tg3_flag(tp, PTP_CAPABLE)) {
17947                 tg3_ptp_init(tp);
17948                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17949                                                    &tp->pdev->dev);
17950                 if (IS_ERR(tp->ptp_clock))
17951                         tp->ptp_clock = NULL;
17952         }
17953
17954         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17955                     tp->board_part_number,
17956                     tg3_chip_rev_id(tp),
17957                     tg3_bus_string(tp, str),
17958                     dev->dev_addr);
17959
17960         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17961                 char *ethtype;
17962
17963                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17964                         ethtype = "10/100Base-TX";
17965                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17966                         ethtype = "1000Base-SX";
17967                 else
17968                         ethtype = "10/100/1000Base-T";
17969
17970                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17971                             "(WireSpeed[%d], EEE[%d])\n",
17972                             tg3_phy_string(tp), ethtype,
17973                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17974                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17975         }
17976
17977         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17978                     (dev->features & NETIF_F_RXCSUM) != 0,
17979                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17980                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17981                     tg3_flag(tp, ENABLE_ASF) != 0,
17982                     tg3_flag(tp, TSO_CAPABLE) != 0);
17983         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17984                     tp->dma_rwctrl,
17985                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17986                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17987
17988         pci_save_state(pdev);
17989
17990         return 0;
17991
17992 err_out_apeunmap:
17993         if (tp->aperegs) {
17994                 iounmap(tp->aperegs);
17995                 tp->aperegs = NULL;
17996         }
17997
17998 err_out_iounmap:
17999         if (tp->regs) {
18000                 iounmap(tp->regs);
18001                 tp->regs = NULL;
18002         }
18003
18004 err_out_free_dev:
18005         free_netdev(dev);
18006
18007 err_out_free_res:
18008         pci_release_regions(pdev);
18009
18010 err_out_disable_pdev:
18011         if (pci_is_enabled(pdev))
18012                 pci_disable_device(pdev);
18013         return err;
18014 }
18015
18016 static void tg3_remove_one(struct pci_dev *pdev)
18017 {
18018         struct net_device *dev = pci_get_drvdata(pdev);
18019
18020         if (dev) {
18021                 struct tg3 *tp = netdev_priv(dev);
18022
18023                 tg3_ptp_fini(tp);
18024
18025                 release_firmware(tp->fw);
18026
18027                 tg3_reset_task_cancel(tp);
18028
18029                 if (tg3_flag(tp, USE_PHYLIB)) {
18030                         tg3_phy_fini(tp);
18031                         tg3_mdio_fini(tp);
18032                 }
18033
18034                 unregister_netdev(dev);
18035                 if (tp->aperegs) {
18036                         iounmap(tp->aperegs);
18037                         tp->aperegs = NULL;
18038                 }
18039                 if (tp->regs) {
18040                         iounmap(tp->regs);
18041                         tp->regs = NULL;
18042                 }
18043                 free_netdev(dev);
18044                 pci_release_regions(pdev);
18045                 pci_disable_device(pdev);
18046         }
18047 }
18048
18049 #ifdef CONFIG_PM_SLEEP
18050 static int tg3_suspend(struct device *device)
18051 {
18052         struct net_device *dev = dev_get_drvdata(device);
18053         struct tg3 *tp = netdev_priv(dev);
18054         int err = 0;
18055
18056         rtnl_lock();
18057
18058         if (!netif_running(dev))
18059                 goto unlock;
18060
18061         tg3_reset_task_cancel(tp);
18062         tg3_phy_stop(tp);
18063         tg3_netif_stop(tp);
18064
18065         tg3_timer_stop(tp);
18066
18067         tg3_full_lock(tp, 1);
18068         tg3_disable_ints(tp);
18069         tg3_full_unlock(tp);
18070
18071         netif_device_detach(dev);
18072
18073         tg3_full_lock(tp, 0);
18074         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18075         tg3_flag_clear(tp, INIT_COMPLETE);
18076         tg3_full_unlock(tp);
18077
18078         err = tg3_power_down_prepare(tp);
18079         if (err) {
18080                 int err2;
18081
18082                 tg3_full_lock(tp, 0);
18083
18084                 tg3_flag_set(tp, INIT_COMPLETE);
18085                 err2 = tg3_restart_hw(tp, true);
18086                 if (err2)
18087                         goto out;
18088
18089                 tg3_timer_start(tp);
18090
18091                 netif_device_attach(dev);
18092                 tg3_netif_start(tp);
18093
18094 out:
18095                 tg3_full_unlock(tp);
18096
18097                 if (!err2)
18098                         tg3_phy_start(tp);
18099         }
18100
18101 unlock:
18102         rtnl_unlock();
18103         return err;
18104 }
18105
18106 static int tg3_resume(struct device *device)
18107 {
18108         struct net_device *dev = dev_get_drvdata(device);
18109         struct tg3 *tp = netdev_priv(dev);
18110         int err = 0;
18111
18112         rtnl_lock();
18113
18114         if (!netif_running(dev))
18115                 goto unlock;
18116
18117         netif_device_attach(dev);
18118
18119         tg3_full_lock(tp, 0);
18120
18121         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18122
18123         tg3_flag_set(tp, INIT_COMPLETE);
18124         err = tg3_restart_hw(tp,
18125                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18126         if (err)
18127                 goto out;
18128
18129         tg3_timer_start(tp);
18130
18131         tg3_netif_start(tp);
18132
18133 out:
18134         tg3_full_unlock(tp);
18135
18136         if (!err)
18137                 tg3_phy_start(tp);
18138
18139 unlock:
18140         rtnl_unlock();
18141         return err;
18142 }
18143 #endif /* CONFIG_PM_SLEEP */
18144
18145 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18146
18147 static void tg3_shutdown(struct pci_dev *pdev)
18148 {
18149         struct net_device *dev = pci_get_drvdata(pdev);
18150         struct tg3 *tp = netdev_priv(dev);
18151
18152         tg3_reset_task_cancel(tp);
18153
18154         rtnl_lock();
18155
18156         netif_device_detach(dev);
18157
18158         if (netif_running(dev))
18159                 dev_close(dev);
18160
18161         if (system_state == SYSTEM_POWER_OFF)
18162                 tg3_power_down(tp);
18163
18164         rtnl_unlock();
18165
18166         pci_disable_device(pdev);
18167 }
18168
18169 /**
18170  * tg3_io_error_detected - called when PCI error is detected
18171  * @pdev: Pointer to PCI device
18172  * @state: The current pci connection state
18173  *
18174  * This function is called after a PCI bus error affecting
18175  * this device has been detected.
18176  */
18177 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18178                                               pci_channel_state_t state)
18179 {
18180         struct net_device *netdev = pci_get_drvdata(pdev);
18181         struct tg3 *tp = netdev_priv(netdev);
18182         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18183
18184         netdev_info(netdev, "PCI I/O error detected\n");
18185
18186         /* Want to make sure that the reset task doesn't run */
18187         tg3_reset_task_cancel(tp);
18188
18189         rtnl_lock();
18190
18191         /* Could be second call or maybe we don't have netdev yet */
18192         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18193                 goto done;
18194
18195         /* We needn't recover from permanent error */
18196         if (state == pci_channel_io_frozen)
18197                 tp->pcierr_recovery = true;
18198
18199         tg3_phy_stop(tp);
18200
18201         tg3_netif_stop(tp);
18202
18203         tg3_timer_stop(tp);
18204
18205         netif_device_detach(netdev);
18206
18207         /* Clean up software state, even if MMIO is blocked */
18208         tg3_full_lock(tp, 0);
18209         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18210         tg3_full_unlock(tp);
18211
18212 done:
18213         if (state == pci_channel_io_perm_failure) {
18214                 if (netdev) {
18215                         tg3_napi_enable(tp);
18216                         dev_close(netdev);
18217                 }
18218                 err = PCI_ERS_RESULT_DISCONNECT;
18219         } else {
18220                 pci_disable_device(pdev);
18221         }
18222
18223         rtnl_unlock();
18224
18225         return err;
18226 }
18227
18228 /**
18229  * tg3_io_slot_reset - called after the pci bus has been reset.
18230  * @pdev: Pointer to PCI device
18231  *
18232  * Restart the card from scratch, as if from a cold-boot.
18233  * At this point, the card has exprienced a hard reset,
18234  * followed by fixups by BIOS, and has its config space
18235  * set up identically to what it was at cold boot.
18236  */
18237 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18238 {
18239         struct net_device *netdev = pci_get_drvdata(pdev);
18240         struct tg3 *tp = netdev_priv(netdev);
18241         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18242         int err;
18243
18244         rtnl_lock();
18245
18246         if (pci_enable_device(pdev)) {
18247                 dev_err(&pdev->dev,
18248                         "Cannot re-enable PCI device after reset.\n");
18249                 goto done;
18250         }
18251
18252         pci_set_master(pdev);
18253         pci_restore_state(pdev);
18254         pci_save_state(pdev);
18255
18256         if (!netdev || !netif_running(netdev)) {
18257                 rc = PCI_ERS_RESULT_RECOVERED;
18258                 goto done;
18259         }
18260
18261         err = tg3_power_up(tp);
18262         if (err)
18263                 goto done;
18264
18265         rc = PCI_ERS_RESULT_RECOVERED;
18266
18267 done:
18268         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18269                 tg3_napi_enable(tp);
18270                 dev_close(netdev);
18271         }
18272         rtnl_unlock();
18273
18274         return rc;
18275 }
18276
18277 /**
18278  * tg3_io_resume - called when traffic can start flowing again.
18279  * @pdev: Pointer to PCI device
18280  *
18281  * This callback is called when the error recovery driver tells
18282  * us that its OK to resume normal operation.
18283  */
18284 static void tg3_io_resume(struct pci_dev *pdev)
18285 {
18286         struct net_device *netdev = pci_get_drvdata(pdev);
18287         struct tg3 *tp = netdev_priv(netdev);
18288         int err;
18289
18290         rtnl_lock();
18291
18292         if (!netdev || !netif_running(netdev))
18293                 goto done;
18294
18295         tg3_full_lock(tp, 0);
18296         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18297         tg3_flag_set(tp, INIT_COMPLETE);
18298         err = tg3_restart_hw(tp, true);
18299         if (err) {
18300                 tg3_full_unlock(tp);
18301                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18302                 goto done;
18303         }
18304
18305         netif_device_attach(netdev);
18306
18307         tg3_timer_start(tp);
18308
18309         tg3_netif_start(tp);
18310
18311         tg3_full_unlock(tp);
18312
18313         tg3_phy_start(tp);
18314
18315 done:
18316         tp->pcierr_recovery = false;
18317         rtnl_unlock();
18318 }
18319
18320 static const struct pci_error_handlers tg3_err_handler = {
18321         .error_detected = tg3_io_error_detected,
18322         .slot_reset     = tg3_io_slot_reset,
18323         .resume         = tg3_io_resume
18324 };
18325
18326 static struct pci_driver tg3_driver = {
18327         .name           = DRV_MODULE_NAME,
18328         .id_table       = tg3_pci_tbl,
18329         .probe          = tg3_init_one,
18330         .remove         = tg3_remove_one,
18331         .err_handler    = &tg3_err_handler,
18332         .driver.pm      = &tg3_pm_ops,
18333         .shutdown       = tg3_shutdown,
18334 };
18335
18336 module_pci_driver(tg3_driver);