Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         default:
969                 return;
970         }
971
972         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
973
974         tg3_ape_send_event(tp, event);
975 }
976
977 static void tg3_disable_ints(struct tg3 *tp)
978 {
979         int i;
980
981         tw32(TG3PCI_MISC_HOST_CTRL,
982              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
983         for (i = 0; i < tp->irq_max; i++)
984                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
985 }
986
987 static void tg3_enable_ints(struct tg3 *tp)
988 {
989         int i;
990
991         tp->irq_sync = 0;
992         wmb();
993
994         tw32(TG3PCI_MISC_HOST_CTRL,
995              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
996
997         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
998         for (i = 0; i < tp->irq_cnt; i++) {
999                 struct tg3_napi *tnapi = &tp->napi[i];
1000
1001                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1002                 if (tg3_flag(tp, 1SHOT_MSI))
1003                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004
1005                 tp->coal_now |= tnapi->coal_now;
1006         }
1007
1008         /* Force an initial interrupt */
1009         if (!tg3_flag(tp, TAGGED_STATUS) &&
1010             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1011                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1012         else
1013                 tw32(HOSTCC_MODE, tp->coal_now);
1014
1015         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1016 }
1017
1018 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1019 {
1020         struct tg3 *tp = tnapi->tp;
1021         struct tg3_hw_status *sblk = tnapi->hw_status;
1022         unsigned int work_exists = 0;
1023
1024         /* check for phy events */
1025         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1026                 if (sblk->status & SD_STATUS_LINK_CHG)
1027                         work_exists = 1;
1028         }
1029
1030         /* check for TX work to do */
1031         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1032                 work_exists = 1;
1033
1034         /* check for RX work to do */
1035         if (tnapi->rx_rcb_prod_idx &&
1036             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1037                 work_exists = 1;
1038
1039         return work_exists;
1040 }
1041
1042 /* tg3_int_reenable
1043  *  similar to tg3_enable_ints, but it accurately determines whether there
1044  *  is new work pending and can return without flushing the PIO write
1045  *  which reenables interrupts
1046  */
1047 static void tg3_int_reenable(struct tg3_napi *tnapi)
1048 {
1049         struct tg3 *tp = tnapi->tp;
1050
1051         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1052         mmiowb();
1053
1054         /* When doing tagged status, this work check is unnecessary.
1055          * The last_tag we write above tells the chip which piece of
1056          * work we've completed.
1057          */
1058         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1059                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1060                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1061 }
1062
1063 static void tg3_switch_clocks(struct tg3 *tp)
1064 {
1065         u32 clock_ctrl;
1066         u32 orig_clock_ctrl;
1067
1068         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1069                 return;
1070
1071         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1072
1073         orig_clock_ctrl = clock_ctrl;
1074         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1075                        CLOCK_CTRL_CLKRUN_OENABLE |
1076                        0x1f);
1077         tp->pci_clock_ctrl = clock_ctrl;
1078
1079         if (tg3_flag(tp, 5705_PLUS)) {
1080                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1081                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1083                 }
1084         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1085                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086                             clock_ctrl |
1087                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1088                             40);
1089                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1091                             40);
1092         }
1093         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1094 }
1095
1096 #define PHY_BUSY_LOOPS  5000
1097
1098 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1099                          u32 *val)
1100 {
1101         u32 frame_val;
1102         unsigned int loops;
1103         int ret;
1104
1105         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1106                 tw32_f(MAC_MI_MODE,
1107                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1108                 udelay(80);
1109         }
1110
1111         tg3_ape_lock(tp, tp->phy_ape_lock);
1112
1113         *val = 0x0;
1114
1115         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1116                       MI_COM_PHY_ADDR_MASK);
1117         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1118                       MI_COM_REG_ADDR_MASK);
1119         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1120
1121         tw32_f(MAC_MI_COM, frame_val);
1122
1123         loops = PHY_BUSY_LOOPS;
1124         while (loops != 0) {
1125                 udelay(10);
1126                 frame_val = tr32(MAC_MI_COM);
1127
1128                 if ((frame_val & MI_COM_BUSY) == 0) {
1129                         udelay(5);
1130                         frame_val = tr32(MAC_MI_COM);
1131                         break;
1132                 }
1133                 loops -= 1;
1134         }
1135
1136         ret = -EBUSY;
1137         if (loops != 0) {
1138                 *val = frame_val & MI_COM_DATA_MASK;
1139                 ret = 0;
1140         }
1141
1142         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1143                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1144                 udelay(80);
1145         }
1146
1147         tg3_ape_unlock(tp, tp->phy_ape_lock);
1148
1149         return ret;
1150 }
1151
1152 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1153 {
1154         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1155 }
1156
1157 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1158                           u32 val)
1159 {
1160         u32 frame_val;
1161         unsigned int loops;
1162         int ret;
1163
1164         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1165             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1166                 return 0;
1167
1168         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1169                 tw32_f(MAC_MI_MODE,
1170                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1171                 udelay(80);
1172         }
1173
1174         tg3_ape_lock(tp, tp->phy_ape_lock);
1175
1176         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1177                       MI_COM_PHY_ADDR_MASK);
1178         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1179                       MI_COM_REG_ADDR_MASK);
1180         frame_val |= (val & MI_COM_DATA_MASK);
1181         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1182
1183         tw32_f(MAC_MI_COM, frame_val);
1184
1185         loops = PHY_BUSY_LOOPS;
1186         while (loops != 0) {
1187                 udelay(10);
1188                 frame_val = tr32(MAC_MI_COM);
1189                 if ((frame_val & MI_COM_BUSY) == 0) {
1190                         udelay(5);
1191                         frame_val = tr32(MAC_MI_COM);
1192                         break;
1193                 }
1194                 loops -= 1;
1195         }
1196
1197         ret = -EBUSY;
1198         if (loops != 0)
1199                 ret = 0;
1200
1201         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1202                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1203                 udelay(80);
1204         }
1205
1206         tg3_ape_unlock(tp, tp->phy_ape_lock);
1207
1208         return ret;
1209 }
1210
1211 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1212 {
1213         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1214 }
1215
1216 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1217 {
1218         int err;
1219
1220         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1221         if (err)
1222                 goto done;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1229                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1230         if (err)
1231                 goto done;
1232
1233         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1234
1235 done:
1236         return err;
1237 }
1238
1239 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1240 {
1241         int err;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1248         if (err)
1249                 goto done;
1250
1251         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1253         if (err)
1254                 goto done;
1255
1256         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1257
1258 done:
1259         return err;
1260 }
1261
1262 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1263 {
1264         int err;
1265
1266         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1267         if (!err)
1268                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1269
1270         return err;
1271 }
1272
1273 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1274 {
1275         int err;
1276
1277         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278         if (!err)
1279                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281         return err;
1282 }
1283
1284 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1289                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1290                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1291         if (!err)
1292                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1293
1294         return err;
1295 }
1296
1297 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1298 {
1299         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1300                 set |= MII_TG3_AUXCTL_MISC_WREN;
1301
1302         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1303 }
1304
1305 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1306 {
1307         u32 val;
1308         int err;
1309
1310         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1311
1312         if (err)
1313                 return err;
1314
1315         if (enable)
1316                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1317         else
1318                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319
1320         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1321                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1322
1323         return err;
1324 }
1325
1326 static int tg3_bmcr_reset(struct tg3 *tp)
1327 {
1328         u32 phy_control;
1329         int limit, err;
1330
1331         /* OK, reset it, and poll the BMCR_RESET bit until it
1332          * clears or we time out.
1333          */
1334         phy_control = BMCR_RESET;
1335         err = tg3_writephy(tp, MII_BMCR, phy_control);
1336         if (err != 0)
1337                 return -EBUSY;
1338
1339         limit = 5000;
1340         while (limit--) {
1341                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1342                 if (err != 0)
1343                         return -EBUSY;
1344
1345                 if ((phy_control & BMCR_RESET) == 0) {
1346                         udelay(40);
1347                         break;
1348                 }
1349                 udelay(10);
1350         }
1351         if (limit < 0)
1352                 return -EBUSY;
1353
1354         return 0;
1355 }
1356
1357 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1358 {
1359         struct tg3 *tp = bp->priv;
1360         u32 val;
1361
1362         spin_lock_bh(&tp->lock);
1363
1364         if (tg3_readphy(tp, reg, &val))
1365                 val = -EIO;
1366
1367         spin_unlock_bh(&tp->lock);
1368
1369         return val;
1370 }
1371
1372 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1373 {
1374         struct tg3 *tp = bp->priv;
1375         u32 ret = 0;
1376
1377         spin_lock_bh(&tp->lock);
1378
1379         if (tg3_writephy(tp, reg, val))
1380                 ret = -EIO;
1381
1382         spin_unlock_bh(&tp->lock);
1383
1384         return ret;
1385 }
1386
1387 static int tg3_mdio_reset(struct mii_bus *bp)
1388 {
1389         return 0;
1390 }
1391
1392 static void tg3_mdio_config_5785(struct tg3 *tp)
1393 {
1394         u32 val;
1395         struct phy_device *phydev;
1396
1397         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1398         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1399         case PHY_ID_BCM50610:
1400         case PHY_ID_BCM50610M:
1401                 val = MAC_PHYCFG2_50610_LED_MODES;
1402                 break;
1403         case PHY_ID_BCMAC131:
1404                 val = MAC_PHYCFG2_AC131_LED_MODES;
1405                 break;
1406         case PHY_ID_RTL8211C:
1407                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8201E:
1410                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1411                 break;
1412         default:
1413                 return;
1414         }
1415
1416         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1417                 tw32(MAC_PHYCFG2, val);
1418
1419                 val = tr32(MAC_PHYCFG1);
1420                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1421                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1422                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1423                 tw32(MAC_PHYCFG1, val);
1424
1425                 return;
1426         }
1427
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1429                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1430                        MAC_PHYCFG2_FMODE_MASK_MASK |
1431                        MAC_PHYCFG2_GMODE_MASK_MASK |
1432                        MAC_PHYCFG2_ACT_MASK_MASK   |
1433                        MAC_PHYCFG2_QUAL_MASK_MASK |
1434                        MAC_PHYCFG2_INBAND_ENABLE;
1435
1436         tw32(MAC_PHYCFG2, val);
1437
1438         val = tr32(MAC_PHYCFG1);
1439         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1440                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1441         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1442                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1443                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1445                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1446         }
1447         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1448                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1449         tw32(MAC_PHYCFG1, val);
1450
1451         val = tr32(MAC_EXT_RGMII_MODE);
1452         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1453                  MAC_RGMII_MODE_RX_QUALITY |
1454                  MAC_RGMII_MODE_RX_ACTIVITY |
1455                  MAC_RGMII_MODE_RX_ENG_DET |
1456                  MAC_RGMII_MODE_TX_ENABLE |
1457                  MAC_RGMII_MODE_TX_LOWPWR |
1458                  MAC_RGMII_MODE_TX_RESET);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_RGMII_MODE_RX_INT_B |
1462                                MAC_RGMII_MODE_RX_QUALITY |
1463                                MAC_RGMII_MODE_RX_ACTIVITY |
1464                                MAC_RGMII_MODE_RX_ENG_DET;
1465                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466                         val |= MAC_RGMII_MODE_TX_ENABLE |
1467                                MAC_RGMII_MODE_TX_LOWPWR |
1468                                MAC_RGMII_MODE_TX_RESET;
1469         }
1470         tw32(MAC_EXT_RGMII_MODE, val);
1471 }
1472
1473 static void tg3_mdio_start(struct tg3 *tp)
1474 {
1475         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1476         tw32_f(MAC_MI_MODE, tp->mi_mode);
1477         udelay(80);
1478
1479         if (tg3_flag(tp, MDIOBUS_INITED) &&
1480             tg3_asic_rev(tp) == ASIC_REV_5785)
1481                 tg3_mdio_config_5785(tp);
1482 }
1483
1484 static int tg3_mdio_init(struct tg3 *tp)
1485 {
1486         int i;
1487         u32 reg;
1488         struct phy_device *phydev;
1489
1490         if (tg3_flag(tp, 5717_PLUS)) {
1491                 u32 is_serdes;
1492
1493                 tp->phy_addr = tp->pci_fn + 1;
1494
1495                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1496                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1497                 else
1498                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1499                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1500                 if (is_serdes)
1501                         tp->phy_addr += 7;
1502         } else
1503                 tp->phy_addr = TG3_PHY_MII_ADDR;
1504
1505         tg3_mdio_start(tp);
1506
1507         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1508                 return 0;
1509
1510         tp->mdio_bus = mdiobus_alloc();
1511         if (tp->mdio_bus == NULL)
1512                 return -ENOMEM;
1513
1514         tp->mdio_bus->name     = "tg3 mdio bus";
1515         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1516                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1517         tp->mdio_bus->priv     = tp;
1518         tp->mdio_bus->parent   = &tp->pdev->dev;
1519         tp->mdio_bus->read     = &tg3_mdio_read;
1520         tp->mdio_bus->write    = &tg3_mdio_write;
1521         tp->mdio_bus->reset    = &tg3_mdio_reset;
1522         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1523         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1524
1525         for (i = 0; i < PHY_MAX_ADDR; i++)
1526                 tp->mdio_bus->irq[i] = PHY_POLL;
1527
1528         /* The bus registration will look for all the PHYs on the mdio bus.
1529          * Unfortunately, it does not ensure the PHY is powered up before
1530          * accessing the PHY ID registers.  A chip reset is the
1531          * quickest way to bring the device back to an operational state..
1532          */
1533         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1534                 tg3_bmcr_reset(tp);
1535
1536         i = mdiobus_register(tp->mdio_bus);
1537         if (i) {
1538                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1539                 mdiobus_free(tp->mdio_bus);
1540                 return i;
1541         }
1542
1543         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1544
1545         if (!phydev || !phydev->drv) {
1546                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1547                 mdiobus_unregister(tp->mdio_bus);
1548                 mdiobus_free(tp->mdio_bus);
1549                 return -ENODEV;
1550         }
1551
1552         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1553         case PHY_ID_BCM57780:
1554                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1555                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1556                 break;
1557         case PHY_ID_BCM50610:
1558         case PHY_ID_BCM50610M:
1559                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1560                                      PHY_BRCM_RX_REFCLK_UNUSED |
1561                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1562                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1563                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1564                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1565                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1566                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1567                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1568                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1569                 /* fallthru */
1570         case PHY_ID_RTL8211C:
1571                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1572                 break;
1573         case PHY_ID_RTL8201E:
1574         case PHY_ID_BCMAC131:
1575                 phydev->interface = PHY_INTERFACE_MODE_MII;
1576                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1578                 break;
1579         }
1580
1581         tg3_flag_set(tp, MDIOBUS_INITED);
1582
1583         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1584                 tg3_mdio_config_5785(tp);
1585
1586         return 0;
1587 }
1588
1589 static void tg3_mdio_fini(struct tg3 *tp)
1590 {
1591         if (tg3_flag(tp, MDIOBUS_INITED)) {
1592                 tg3_flag_clear(tp, MDIOBUS_INITED);
1593                 mdiobus_unregister(tp->mdio_bus);
1594                 mdiobus_free(tp->mdio_bus);
1595         }
1596 }
1597
1598 /* tp->lock is held. */
1599 static inline void tg3_generate_fw_event(struct tg3 *tp)
1600 {
1601         u32 val;
1602
1603         val = tr32(GRC_RX_CPU_EVENT);
1604         val |= GRC_RX_CPU_DRIVER_EVENT;
1605         tw32_f(GRC_RX_CPU_EVENT, val);
1606
1607         tp->last_event_jiffies = jiffies;
1608 }
1609
1610 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1611
1612 /* tp->lock is held. */
1613 static void tg3_wait_for_event_ack(struct tg3 *tp)
1614 {
1615         int i;
1616         unsigned int delay_cnt;
1617         long time_remain;
1618
1619         /* If enough time has passed, no wait is necessary. */
1620         time_remain = (long)(tp->last_event_jiffies + 1 +
1621                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1622                       (long)jiffies;
1623         if (time_remain < 0)
1624                 return;
1625
1626         /* Check if we can shorten the wait time. */
1627         delay_cnt = jiffies_to_usecs(time_remain);
1628         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1629                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1630         delay_cnt = (delay_cnt >> 3) + 1;
1631
1632         for (i = 0; i < delay_cnt; i++) {
1633                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1634                         break;
1635                 udelay(8);
1636         }
1637 }
1638
1639 /* tp->lock is held. */
1640 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1641 {
1642         u32 reg, val;
1643
1644         val = 0;
1645         if (!tg3_readphy(tp, MII_BMCR, &reg))
1646                 val = reg << 16;
1647         if (!tg3_readphy(tp, MII_BMSR, &reg))
1648                 val |= (reg & 0xffff);
1649         *data++ = val;
1650
1651         val = 0;
1652         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1653                 val = reg << 16;
1654         if (!tg3_readphy(tp, MII_LPA, &reg))
1655                 val |= (reg & 0xffff);
1656         *data++ = val;
1657
1658         val = 0;
1659         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1660                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1661                         val = reg << 16;
1662                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1663                         val |= (reg & 0xffff);
1664         }
1665         *data++ = val;
1666
1667         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1668                 val = reg << 16;
1669         else
1670                 val = 0;
1671         *data++ = val;
1672 }
1673
1674 /* tp->lock is held. */
1675 static void tg3_ump_link_report(struct tg3 *tp)
1676 {
1677         u32 data[4];
1678
1679         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1680                 return;
1681
1682         tg3_phy_gather_ump_data(tp, data);
1683
1684         tg3_wait_for_event_ack(tp);
1685
1686         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1687         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1692
1693         tg3_generate_fw_event(tp);
1694 }
1695
1696 /* tp->lock is held. */
1697 static void tg3_stop_fw(struct tg3 *tp)
1698 {
1699         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1700                 /* Wait for RX cpu to ACK the previous event. */
1701                 tg3_wait_for_event_ack(tp);
1702
1703                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1704
1705                 tg3_generate_fw_event(tp);
1706
1707                 /* Wait for RX cpu to ACK this event. */
1708                 tg3_wait_for_event_ack(tp);
1709         }
1710 }
1711
1712 /* tp->lock is held. */
1713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1714 {
1715         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1716                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1717
1718         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1719                 switch (kind) {
1720                 case RESET_KIND_INIT:
1721                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1722                                       DRV_STATE_START);
1723                         break;
1724
1725                 case RESET_KIND_SHUTDOWN:
1726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727                                       DRV_STATE_UNLOAD);
1728                         break;
1729
1730                 case RESET_KIND_SUSPEND:
1731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1732                                       DRV_STATE_SUSPEND);
1733                         break;
1734
1735                 default:
1736                         break;
1737                 }
1738         }
1739 }
1740
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1743 {
1744         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1745                 switch (kind) {
1746                 case RESET_KIND_INIT:
1747                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748                                       DRV_STATE_START_DONE);
1749                         break;
1750
1751                 case RESET_KIND_SHUTDOWN:
1752                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753                                       DRV_STATE_UNLOAD_DONE);
1754                         break;
1755
1756                 default:
1757                         break;
1758                 }
1759         }
1760 }
1761
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1764 {
1765         if (tg3_flag(tp, ENABLE_ASF)) {
1766                 switch (kind) {
1767                 case RESET_KIND_INIT:
1768                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769                                       DRV_STATE_START);
1770                         break;
1771
1772                 case RESET_KIND_SHUTDOWN:
1773                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774                                       DRV_STATE_UNLOAD);
1775                         break;
1776
1777                 case RESET_KIND_SUSPEND:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_SUSPEND);
1780                         break;
1781
1782                 default:
1783                         break;
1784                 }
1785         }
1786 }
1787
1788 static int tg3_poll_fw(struct tg3 *tp)
1789 {
1790         int i;
1791         u32 val;
1792
1793         if (tg3_flag(tp, IS_SSB_CORE)) {
1794                 /* We don't use firmware. */
1795                 return 0;
1796         }
1797
1798         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1799                 /* Wait up to 20ms for init done. */
1800                 for (i = 0; i < 200; i++) {
1801                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1802                                 return 0;
1803                         udelay(100);
1804                 }
1805                 return -ENODEV;
1806         }
1807
1808         /* Wait for firmware initialization to complete. */
1809         for (i = 0; i < 100000; i++) {
1810                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1811                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1812                         break;
1813                 udelay(10);
1814         }
1815
1816         /* Chip might not be fitted with firmware.  Some Sun onboard
1817          * parts are configured like that.  So don't signal the timeout
1818          * of the above loop as an error, but do report the lack of
1819          * running firmware once.
1820          */
1821         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1822                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1823
1824                 netdev_info(tp->dev, "No firmware running\n");
1825         }
1826
1827         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1828                 /* The 57765 A0 needs a little more
1829                  * time to do some important work.
1830                  */
1831                 mdelay(10);
1832         }
1833
1834         return 0;
1835 }
1836
1837 static void tg3_link_report(struct tg3 *tp)
1838 {
1839         if (!netif_carrier_ok(tp->dev)) {
1840                 netif_info(tp, link, tp->dev, "Link is down\n");
1841                 tg3_ump_link_report(tp);
1842         } else if (netif_msg_link(tp)) {
1843                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1844                             (tp->link_config.active_speed == SPEED_1000 ?
1845                              1000 :
1846                              (tp->link_config.active_speed == SPEED_100 ?
1847                               100 : 10)),
1848                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1849                              "full" : "half"));
1850
1851                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1852                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1853                             "on" : "off",
1854                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1855                             "on" : "off");
1856
1857                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1858                         netdev_info(tp->dev, "EEE is %s\n",
1859                                     tp->setlpicnt ? "enabled" : "disabled");
1860
1861                 tg3_ump_link_report(tp);
1862         }
1863
1864         tp->link_up = netif_carrier_ok(tp->dev);
1865 }
1866
1867 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1868 {
1869         u32 flowctrl = 0;
1870
1871         if (adv & ADVERTISE_PAUSE_CAP) {
1872                 flowctrl |= FLOW_CTRL_RX;
1873                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1874                         flowctrl |= FLOW_CTRL_TX;
1875         } else if (adv & ADVERTISE_PAUSE_ASYM)
1876                 flowctrl |= FLOW_CTRL_TX;
1877
1878         return flowctrl;
1879 }
1880
1881 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1882 {
1883         u16 miireg;
1884
1885         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1886                 miireg = ADVERTISE_1000XPAUSE;
1887         else if (flow_ctrl & FLOW_CTRL_TX)
1888                 miireg = ADVERTISE_1000XPSE_ASYM;
1889         else if (flow_ctrl & FLOW_CTRL_RX)
1890                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1891         else
1892                 miireg = 0;
1893
1894         return miireg;
1895 }
1896
1897 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1898 {
1899         u32 flowctrl = 0;
1900
1901         if (adv & ADVERTISE_1000XPAUSE) {
1902                 flowctrl |= FLOW_CTRL_RX;
1903                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1904                         flowctrl |= FLOW_CTRL_TX;
1905         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1906                 flowctrl |= FLOW_CTRL_TX;
1907
1908         return flowctrl;
1909 }
1910
1911 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1912 {
1913         u8 cap = 0;
1914
1915         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1916                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1917         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1918                 if (lcladv & ADVERTISE_1000XPAUSE)
1919                         cap = FLOW_CTRL_RX;
1920                 if (rmtadv & ADVERTISE_1000XPAUSE)
1921                         cap = FLOW_CTRL_TX;
1922         }
1923
1924         return cap;
1925 }
1926
1927 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1928 {
1929         u8 autoneg;
1930         u8 flowctrl = 0;
1931         u32 old_rx_mode = tp->rx_mode;
1932         u32 old_tx_mode = tp->tx_mode;
1933
1934         if (tg3_flag(tp, USE_PHYLIB))
1935                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1936         else
1937                 autoneg = tp->link_config.autoneg;
1938
1939         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1940                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1941                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1942                 else
1943                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1944         } else
1945                 flowctrl = tp->link_config.flowctrl;
1946
1947         tp->link_config.active_flowctrl = flowctrl;
1948
1949         if (flowctrl & FLOW_CTRL_RX)
1950                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1951         else
1952                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1953
1954         if (old_rx_mode != tp->rx_mode)
1955                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1956
1957         if (flowctrl & FLOW_CTRL_TX)
1958                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1959         else
1960                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1961
1962         if (old_tx_mode != tp->tx_mode)
1963                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1964 }
1965
1966 static void tg3_adjust_link(struct net_device *dev)
1967 {
1968         u8 oldflowctrl, linkmesg = 0;
1969         u32 mac_mode, lcl_adv, rmt_adv;
1970         struct tg3 *tp = netdev_priv(dev);
1971         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1972
1973         spin_lock_bh(&tp->lock);
1974
1975         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1976                                     MAC_MODE_HALF_DUPLEX);
1977
1978         oldflowctrl = tp->link_config.active_flowctrl;
1979
1980         if (phydev->link) {
1981                 lcl_adv = 0;
1982                 rmt_adv = 0;
1983
1984                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1985                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1986                 else if (phydev->speed == SPEED_1000 ||
1987                          tg3_asic_rev(tp) != ASIC_REV_5785)
1988                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989                 else
1990                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1991
1992                 if (phydev->duplex == DUPLEX_HALF)
1993                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1994                 else {
1995                         lcl_adv = mii_advertise_flowctrl(
1996                                   tp->link_config.flowctrl);
1997
1998                         if (phydev->pause)
1999                                 rmt_adv = LPA_PAUSE_CAP;
2000                         if (phydev->asym_pause)
2001                                 rmt_adv |= LPA_PAUSE_ASYM;
2002                 }
2003
2004                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2005         } else
2006                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2007
2008         if (mac_mode != tp->mac_mode) {
2009                 tp->mac_mode = mac_mode;
2010                 tw32_f(MAC_MODE, tp->mac_mode);
2011                 udelay(40);
2012         }
2013
2014         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2015                 if (phydev->speed == SPEED_10)
2016                         tw32(MAC_MI_STAT,
2017                              MAC_MI_STAT_10MBPS_MODE |
2018                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2019                 else
2020                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2021         }
2022
2023         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2024                 tw32(MAC_TX_LENGTHS,
2025                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2026                       (6 << TX_LENGTHS_IPG_SHIFT) |
2027                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2028         else
2029                 tw32(MAC_TX_LENGTHS,
2030                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2031                       (6 << TX_LENGTHS_IPG_SHIFT) |
2032                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2033
2034         if (phydev->link != tp->old_link ||
2035             phydev->speed != tp->link_config.active_speed ||
2036             phydev->duplex != tp->link_config.active_duplex ||
2037             oldflowctrl != tp->link_config.active_flowctrl)
2038                 linkmesg = 1;
2039
2040         tp->old_link = phydev->link;
2041         tp->link_config.active_speed = phydev->speed;
2042         tp->link_config.active_duplex = phydev->duplex;
2043
2044         spin_unlock_bh(&tp->lock);
2045
2046         if (linkmesg)
2047                 tg3_link_report(tp);
2048 }
2049
2050 static int tg3_phy_init(struct tg3 *tp)
2051 {
2052         struct phy_device *phydev;
2053
2054         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2055                 return 0;
2056
2057         /* Bring the PHY back to a known state. */
2058         tg3_bmcr_reset(tp);
2059
2060         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2061
2062         /* Attach the MAC to the PHY. */
2063         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2064                              tg3_adjust_link, phydev->interface);
2065         if (IS_ERR(phydev)) {
2066                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2067                 return PTR_ERR(phydev);
2068         }
2069
2070         /* Mask with MAC supported features. */
2071         switch (phydev->interface) {
2072         case PHY_INTERFACE_MODE_GMII:
2073         case PHY_INTERFACE_MODE_RGMII:
2074                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2075                         phydev->supported &= (PHY_GBIT_FEATURES |
2076                                               SUPPORTED_Pause |
2077                                               SUPPORTED_Asym_Pause);
2078                         break;
2079                 }
2080                 /* fallthru */
2081         case PHY_INTERFACE_MODE_MII:
2082                 phydev->supported &= (PHY_BASIC_FEATURES |
2083                                       SUPPORTED_Pause |
2084                                       SUPPORTED_Asym_Pause);
2085                 break;
2086         default:
2087                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2088                 return -EINVAL;
2089         }
2090
2091         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2092
2093         phydev->advertising = phydev->supported;
2094
2095         return 0;
2096 }
2097
2098 static void tg3_phy_start(struct tg3 *tp)
2099 {
2100         struct phy_device *phydev;
2101
2102         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2103                 return;
2104
2105         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2106
2107         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2108                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2109                 phydev->speed = tp->link_config.speed;
2110                 phydev->duplex = tp->link_config.duplex;
2111                 phydev->autoneg = tp->link_config.autoneg;
2112                 phydev->advertising = tp->link_config.advertising;
2113         }
2114
2115         phy_start(phydev);
2116
2117         phy_start_aneg(phydev);
2118 }
2119
2120 static void tg3_phy_stop(struct tg3 *tp)
2121 {
2122         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2123                 return;
2124
2125         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2126 }
2127
2128 static void tg3_phy_fini(struct tg3 *tp)
2129 {
2130         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2131                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2132                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2133         }
2134 }
2135
2136 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2137 {
2138         int err;
2139         u32 val;
2140
2141         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2142                 return 0;
2143
2144         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2145                 /* Cannot do read-modify-write on 5401 */
2146                 err = tg3_phy_auxctl_write(tp,
2147                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2149                                            0x4c20);
2150                 goto done;
2151         }
2152
2153         err = tg3_phy_auxctl_read(tp,
2154                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2155         if (err)
2156                 return err;
2157
2158         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2159         err = tg3_phy_auxctl_write(tp,
2160                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2161
2162 done:
2163         return err;
2164 }
2165
2166 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2167 {
2168         u32 phytest;
2169
2170         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2171                 u32 phy;
2172
2173                 tg3_writephy(tp, MII_TG3_FET_TEST,
2174                              phytest | MII_TG3_FET_SHADOW_EN);
2175                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2176                         if (enable)
2177                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2178                         else
2179                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2180                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2181                 }
2182                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2183         }
2184 }
2185
2186 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2187 {
2188         u32 reg;
2189
2190         if (!tg3_flag(tp, 5705_PLUS) ||
2191             (tg3_flag(tp, 5717_PLUS) &&
2192              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2193                 return;
2194
2195         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2196                 tg3_phy_fet_toggle_apd(tp, enable);
2197                 return;
2198         }
2199
2200         reg = MII_TG3_MISC_SHDW_WREN |
2201               MII_TG3_MISC_SHDW_SCR5_SEL |
2202               MII_TG3_MISC_SHDW_SCR5_LPED |
2203               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2204               MII_TG3_MISC_SHDW_SCR5_SDTL |
2205               MII_TG3_MISC_SHDW_SCR5_C125OE;
2206         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2207                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2208
2209         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2210
2211
2212         reg = MII_TG3_MISC_SHDW_WREN |
2213               MII_TG3_MISC_SHDW_APD_SEL |
2214               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2215         if (enable)
2216                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2217
2218         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2219 }
2220
2221 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2222 {
2223         u32 phy;
2224
2225         if (!tg3_flag(tp, 5705_PLUS) ||
2226             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2227                 return;
2228
2229         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230                 u32 ephy;
2231
2232                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2233                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2234
2235                         tg3_writephy(tp, MII_TG3_FET_TEST,
2236                                      ephy | MII_TG3_FET_SHADOW_EN);
2237                         if (!tg3_readphy(tp, reg, &phy)) {
2238                                 if (enable)
2239                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2240                                 else
2241                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2242                                 tg3_writephy(tp, reg, phy);
2243                         }
2244                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2245                 }
2246         } else {
2247                 int ret;
2248
2249                 ret = tg3_phy_auxctl_read(tp,
2250                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2251                 if (!ret) {
2252                         if (enable)
2253                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2254                         else
2255                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2256                         tg3_phy_auxctl_write(tp,
2257                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2258                 }
2259         }
2260 }
2261
2262 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2263 {
2264         int ret;
2265         u32 val;
2266
2267         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2268                 return;
2269
2270         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2271         if (!ret)
2272                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2273                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2274 }
2275
2276 static void tg3_phy_apply_otp(struct tg3 *tp)
2277 {
2278         u32 otp, phy;
2279
2280         if (!tp->phy_otp)
2281                 return;
2282
2283         otp = tp->phy_otp;
2284
2285         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2286                 return;
2287
2288         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2289         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2290         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2291
2292         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2293               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2294         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2295
2296         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2297         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2298         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2299
2300         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2301         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2302
2303         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2304         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2305
2306         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2307               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2308         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2309
2310         tg3_phy_toggle_auxctl_smdsp(tp, false);
2311 }
2312
2313 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2314 {
2315         u32 val;
2316         struct ethtool_eee *dest = &tp->eee;
2317
2318         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2319                 return;
2320
2321         if (eee)
2322                 dest = eee;
2323
2324         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2325                 return;
2326
2327         /* Pull eee_active */
2328         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2329             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2330                 dest->eee_active = 1;
2331         } else
2332                 dest->eee_active = 0;
2333
2334         /* Pull lp advertised settings */
2335         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2336                 return;
2337         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2338
2339         /* Pull advertised and eee_enabled settings */
2340         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2341                 return;
2342         dest->eee_enabled = !!val;
2343         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2344
2345         /* Pull tx_lpi_enabled */
2346         val = tr32(TG3_CPMU_EEE_MODE);
2347         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2348
2349         /* Pull lpi timer value */
2350         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2351 }
2352
2353 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2354 {
2355         u32 val;
2356
2357         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2358                 return;
2359
2360         tp->setlpicnt = 0;
2361
2362         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2363             current_link_up &&
2364             tp->link_config.active_duplex == DUPLEX_FULL &&
2365             (tp->link_config.active_speed == SPEED_100 ||
2366              tp->link_config.active_speed == SPEED_1000)) {
2367                 u32 eeectl;
2368
2369                 if (tp->link_config.active_speed == SPEED_1000)
2370                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2371                 else
2372                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2373
2374                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2375
2376                 tg3_eee_pull_config(tp, NULL);
2377                 if (tp->eee.eee_active)
2378                         tp->setlpicnt = 2;
2379         }
2380
2381         if (!tp->setlpicnt) {
2382                 if (current_link_up &&
2383                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2384                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2385                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2386                 }
2387
2388                 val = tr32(TG3_CPMU_EEE_MODE);
2389                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2390         }
2391 }
2392
2393 static void tg3_phy_eee_enable(struct tg3 *tp)
2394 {
2395         u32 val;
2396
2397         if (tp->link_config.active_speed == SPEED_1000 &&
2398             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2399              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2400              tg3_flag(tp, 57765_CLASS)) &&
2401             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2402                 val = MII_TG3_DSP_TAP26_ALNOKO |
2403                       MII_TG3_DSP_TAP26_RMRXSTO;
2404                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2405                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2406         }
2407
2408         val = tr32(TG3_CPMU_EEE_MODE);
2409         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2410 }
2411
2412 static int tg3_wait_macro_done(struct tg3 *tp)
2413 {
2414         int limit = 100;
2415
2416         while (limit--) {
2417                 u32 tmp32;
2418
2419                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2420                         if ((tmp32 & 0x1000) == 0)
2421                                 break;
2422                 }
2423         }
2424         if (limit < 0)
2425                 return -EBUSY;
2426
2427         return 0;
2428 }
2429
2430 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2431 {
2432         static const u32 test_pat[4][6] = {
2433         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2434         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2435         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2436         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2437         };
2438         int chan;
2439
2440         for (chan = 0; chan < 4; chan++) {
2441                 int i;
2442
2443                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2444                              (chan * 0x2000) | 0x0200);
2445                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2446
2447                 for (i = 0; i < 6; i++)
2448                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2449                                      test_pat[chan][i]);
2450
2451                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2452                 if (tg3_wait_macro_done(tp)) {
2453                         *resetp = 1;
2454                         return -EBUSY;
2455                 }
2456
2457                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2458                              (chan * 0x2000) | 0x0200);
2459                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2460                 if (tg3_wait_macro_done(tp)) {
2461                         *resetp = 1;
2462                         return -EBUSY;
2463                 }
2464
2465                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2466                 if (tg3_wait_macro_done(tp)) {
2467                         *resetp = 1;
2468                         return -EBUSY;
2469                 }
2470
2471                 for (i = 0; i < 6; i += 2) {
2472                         u32 low, high;
2473
2474                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2475                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2476                             tg3_wait_macro_done(tp)) {
2477                                 *resetp = 1;
2478                                 return -EBUSY;
2479                         }
2480                         low &= 0x7fff;
2481                         high &= 0x000f;
2482                         if (low != test_pat[chan][i] ||
2483                             high != test_pat[chan][i+1]) {
2484                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2485                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2486                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2487
2488                                 return -EBUSY;
2489                         }
2490                 }
2491         }
2492
2493         return 0;
2494 }
2495
2496 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2497 {
2498         int chan;
2499
2500         for (chan = 0; chan < 4; chan++) {
2501                 int i;
2502
2503                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2504                              (chan * 0x2000) | 0x0200);
2505                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2506                 for (i = 0; i < 6; i++)
2507                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2508                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2509                 if (tg3_wait_macro_done(tp))
2510                         return -EBUSY;
2511         }
2512
2513         return 0;
2514 }
2515
2516 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2517 {
2518         u32 reg32, phy9_orig;
2519         int retries, do_phy_reset, err;
2520
2521         retries = 10;
2522         do_phy_reset = 1;
2523         do {
2524                 if (do_phy_reset) {
2525                         err = tg3_bmcr_reset(tp);
2526                         if (err)
2527                                 return err;
2528                         do_phy_reset = 0;
2529                 }
2530
2531                 /* Disable transmitter and interrupt.  */
2532                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2533                         continue;
2534
2535                 reg32 |= 0x3000;
2536                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2537
2538                 /* Set full-duplex, 1000 mbps.  */
2539                 tg3_writephy(tp, MII_BMCR,
2540                              BMCR_FULLDPLX | BMCR_SPEED1000);
2541
2542                 /* Set to master mode.  */
2543                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2544                         continue;
2545
2546                 tg3_writephy(tp, MII_CTRL1000,
2547                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2548
2549                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2550                 if (err)
2551                         return err;
2552
2553                 /* Block the PHY control access.  */
2554                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2555
2556                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2557                 if (!err)
2558                         break;
2559         } while (--retries);
2560
2561         err = tg3_phy_reset_chanpat(tp);
2562         if (err)
2563                 return err;
2564
2565         tg3_phydsp_write(tp, 0x8005, 0x0000);
2566
2567         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2568         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2569
2570         tg3_phy_toggle_auxctl_smdsp(tp, false);
2571
2572         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2573
2574         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2575                 reg32 &= ~0x3000;
2576                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577         } else if (!err)
2578                 err = -EBUSY;
2579
2580         return err;
2581 }
2582
2583 static void tg3_carrier_off(struct tg3 *tp)
2584 {
2585         netif_carrier_off(tp->dev);
2586         tp->link_up = false;
2587 }
2588
2589 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2590 {
2591         if (tg3_flag(tp, ENABLE_ASF))
2592                 netdev_warn(tp->dev,
2593                             "Management side-band traffic will be interrupted during phy settings change\n");
2594 }
2595
2596 /* This will reset the tigon3 PHY if there is no valid
2597  * link unless the FORCE argument is non-zero.
2598  */
2599 static int tg3_phy_reset(struct tg3 *tp)
2600 {
2601         u32 val, cpmuctrl;
2602         int err;
2603
2604         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2605                 val = tr32(GRC_MISC_CFG);
2606                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2607                 udelay(40);
2608         }
2609         err  = tg3_readphy(tp, MII_BMSR, &val);
2610         err |= tg3_readphy(tp, MII_BMSR, &val);
2611         if (err != 0)
2612                 return -EBUSY;
2613
2614         if (netif_running(tp->dev) && tp->link_up) {
2615                 netif_carrier_off(tp->dev);
2616                 tg3_link_report(tp);
2617         }
2618
2619         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2620             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2621             tg3_asic_rev(tp) == ASIC_REV_5705) {
2622                 err = tg3_phy_reset_5703_4_5(tp);
2623                 if (err)
2624                         return err;
2625                 goto out;
2626         }
2627
2628         cpmuctrl = 0;
2629         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2630             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2631                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2632                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2633                         tw32(TG3_CPMU_CTRL,
2634                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2635         }
2636
2637         err = tg3_bmcr_reset(tp);
2638         if (err)
2639                 return err;
2640
2641         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2642                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2643                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2644
2645                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2646         }
2647
2648         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2649             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2650                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2651                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2652                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2653                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2654                         udelay(40);
2655                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2656                 }
2657         }
2658
2659         if (tg3_flag(tp, 5717_PLUS) &&
2660             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2661                 return 0;
2662
2663         tg3_phy_apply_otp(tp);
2664
2665         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2666                 tg3_phy_toggle_apd(tp, true);
2667         else
2668                 tg3_phy_toggle_apd(tp, false);
2669
2670 out:
2671         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2672             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2673                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2674                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2675                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2676         }
2677
2678         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2679                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2680                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2681         }
2682
2683         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2684                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2685                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2686                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2687                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2688                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2689                 }
2690         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2691                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2692                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2693                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2694                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2695                                 tg3_writephy(tp, MII_TG3_TEST1,
2696                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2697                         } else
2698                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2699
2700                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2701                 }
2702         }
2703
2704         /* Set Extended packet length bit (bit 14) on all chips that */
2705         /* support jumbo frames */
2706         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2707                 /* Cannot do read-modify-write on 5401 */
2708                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2709         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2710                 /* Set bit 14 with read-modify-write to preserve other bits */
2711                 err = tg3_phy_auxctl_read(tp,
2712                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2713                 if (!err)
2714                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2715                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2716         }
2717
2718         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2719          * jumbo frames transmission.
2720          */
2721         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2722                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2723                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2724                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2725         }
2726
2727         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2728                 /* adjust output voltage */
2729                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2730         }
2731
2732         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2733                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2734
2735         tg3_phy_toggle_automdix(tp, true);
2736         tg3_phy_set_wirespeed(tp);
2737         return 0;
2738 }
2739
2740 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2741 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2742 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2743                                           TG3_GPIO_MSG_NEED_VAUX)
2744 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2745         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2746          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2747          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2748          (TG3_GPIO_MSG_DRVR_PRES << 12))
2749
2750 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2751         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2752          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2753          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2754          (TG3_GPIO_MSG_NEED_VAUX << 12))
2755
2756 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2757 {
2758         u32 status, shift;
2759
2760         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2761             tg3_asic_rev(tp) == ASIC_REV_5719)
2762                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2763         else
2764                 status = tr32(TG3_CPMU_DRV_STATUS);
2765
2766         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2767         status &= ~(TG3_GPIO_MSG_MASK << shift);
2768         status |= (newstat << shift);
2769
2770         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2771             tg3_asic_rev(tp) == ASIC_REV_5719)
2772                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2773         else
2774                 tw32(TG3_CPMU_DRV_STATUS, status);
2775
2776         return status >> TG3_APE_GPIO_MSG_SHIFT;
2777 }
2778
2779 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2780 {
2781         if (!tg3_flag(tp, IS_NIC))
2782                 return 0;
2783
2784         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2785             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2786             tg3_asic_rev(tp) == ASIC_REV_5720) {
2787                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2788                         return -EIO;
2789
2790                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2791
2792                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2793                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2796         } else {
2797                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2798                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2799         }
2800
2801         return 0;
2802 }
2803
2804 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2805 {
2806         u32 grc_local_ctrl;
2807
2808         if (!tg3_flag(tp, IS_NIC) ||
2809             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5701)
2811                 return;
2812
2813         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2814
2815         tw32_wait_f(GRC_LOCAL_CTRL,
2816                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2817                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2818
2819         tw32_wait_f(GRC_LOCAL_CTRL,
2820                     grc_local_ctrl,
2821                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2822
2823         tw32_wait_f(GRC_LOCAL_CTRL,
2824                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2825                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 }
2827
2828 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2829 {
2830         if (!tg3_flag(tp, IS_NIC))
2831                 return;
2832
2833         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2834             tg3_asic_rev(tp) == ASIC_REV_5701) {
2835                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2836                             (GRC_LCLCTRL_GPIO_OE0 |
2837                              GRC_LCLCTRL_GPIO_OE1 |
2838                              GRC_LCLCTRL_GPIO_OE2 |
2839                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2840                              GRC_LCLCTRL_GPIO_OUTPUT1),
2841                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2842         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2843                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2844                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2845                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2846                                      GRC_LCLCTRL_GPIO_OE1 |
2847                                      GRC_LCLCTRL_GPIO_OE2 |
2848                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2849                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2850                                      tp->grc_local_ctrl;
2851                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2852                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2855                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2856                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2857
2858                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2859                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2860                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2861         } else {
2862                 u32 no_gpio2;
2863                 u32 grc_local_ctrl = 0;
2864
2865                 /* Workaround to prevent overdrawing Amps. */
2866                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2867                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2868                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869                                     grc_local_ctrl,
2870                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2871                 }
2872
2873                 /* On 5753 and variants, GPIO2 cannot be used. */
2874                 no_gpio2 = tp->nic_sram_data_cfg &
2875                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2876
2877                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2878                                   GRC_LCLCTRL_GPIO_OE1 |
2879                                   GRC_LCLCTRL_GPIO_OE2 |
2880                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2881                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2882                 if (no_gpio2) {
2883                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2884                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2885                 }
2886                 tw32_wait_f(GRC_LOCAL_CTRL,
2887                             tp->grc_local_ctrl | grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2891
2892                 tw32_wait_f(GRC_LOCAL_CTRL,
2893                             tp->grc_local_ctrl | grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895
2896                 if (!no_gpio2) {
2897                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2898                         tw32_wait_f(GRC_LOCAL_CTRL,
2899                                     tp->grc_local_ctrl | grc_local_ctrl,
2900                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2901                 }
2902         }
2903 }
2904
2905 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2906 {
2907         u32 msg = 0;
2908
2909         /* Serialize power state transitions */
2910         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2911                 return;
2912
2913         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2914                 msg = TG3_GPIO_MSG_NEED_VAUX;
2915
2916         msg = tg3_set_function_status(tp, msg);
2917
2918         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2919                 goto done;
2920
2921         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2922                 tg3_pwrsrc_switch_to_vaux(tp);
2923         else
2924                 tg3_pwrsrc_die_with_vmain(tp);
2925
2926 done:
2927         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2928 }
2929
2930 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2931 {
2932         bool need_vaux = false;
2933
2934         /* The GPIOs do something completely different on 57765. */
2935         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2936                 return;
2937
2938         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2939             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2940             tg3_asic_rev(tp) == ASIC_REV_5720) {
2941                 tg3_frob_aux_power_5717(tp, include_wol ?
2942                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2943                 return;
2944         }
2945
2946         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2947                 struct net_device *dev_peer;
2948
2949                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2950
2951                 /* remove_one() may have been run on the peer. */
2952                 if (dev_peer) {
2953                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2954
2955                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2956                                 return;
2957
2958                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2959                             tg3_flag(tp_peer, ENABLE_ASF))
2960                                 need_vaux = true;
2961                 }
2962         }
2963
2964         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2965             tg3_flag(tp, ENABLE_ASF))
2966                 need_vaux = true;
2967
2968         if (need_vaux)
2969                 tg3_pwrsrc_switch_to_vaux(tp);
2970         else
2971                 tg3_pwrsrc_die_with_vmain(tp);
2972 }
2973
2974 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2975 {
2976         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2977                 return 1;
2978         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2979                 if (speed != SPEED_10)
2980                         return 1;
2981         } else if (speed == SPEED_10)
2982                 return 1;
2983
2984         return 0;
2985 }
2986
2987 static bool tg3_phy_power_bug(struct tg3 *tp)
2988 {
2989         switch (tg3_asic_rev(tp)) {
2990         case ASIC_REV_5700:
2991         case ASIC_REV_5704:
2992                 return true;
2993         case ASIC_REV_5780:
2994                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2995                         return true;
2996                 return false;
2997         case ASIC_REV_5717:
2998                 if (!tp->pci_fn)
2999                         return true;
3000                 return false;
3001         case ASIC_REV_5719:
3002         case ASIC_REV_5720:
3003                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3004                     !tp->pci_fn)
3005                         return true;
3006                 return false;
3007         }
3008
3009         return false;
3010 }
3011
3012 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3013 {
3014         u32 val;
3015
3016         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3017                 return;
3018
3019         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3020                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3021                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3022                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3023
3024                         sg_dig_ctrl |=
3025                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3026                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3027                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3028                 }
3029                 return;
3030         }
3031
3032         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3033                 tg3_bmcr_reset(tp);
3034                 val = tr32(GRC_MISC_CFG);
3035                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3036                 udelay(40);
3037                 return;
3038         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3039                 u32 phytest;
3040                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3041                         u32 phy;
3042
3043                         tg3_writephy(tp, MII_ADVERTISE, 0);
3044                         tg3_writephy(tp, MII_BMCR,
3045                                      BMCR_ANENABLE | BMCR_ANRESTART);
3046
3047                         tg3_writephy(tp, MII_TG3_FET_TEST,
3048                                      phytest | MII_TG3_FET_SHADOW_EN);
3049                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3050                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3051                                 tg3_writephy(tp,
3052                                              MII_TG3_FET_SHDW_AUXMODE4,
3053                                              phy);
3054                         }
3055                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3056                 }
3057                 return;
3058         } else if (do_low_power) {
3059                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3060                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3061
3062                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3063                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3064                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3065                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3066         }
3067
3068         /* The PHY should not be powered down on some chips because
3069          * of bugs.
3070          */
3071         if (tg3_phy_power_bug(tp))
3072                 return;
3073
3074         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3075             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3076                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3077                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3078                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3079                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3080         }
3081
3082         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3083 }
3084
3085 /* tp->lock is held. */
3086 static int tg3_nvram_lock(struct tg3 *tp)
3087 {
3088         if (tg3_flag(tp, NVRAM)) {
3089                 int i;
3090
3091                 if (tp->nvram_lock_cnt == 0) {
3092                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3093                         for (i = 0; i < 8000; i++) {
3094                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3095                                         break;
3096                                 udelay(20);
3097                         }
3098                         if (i == 8000) {
3099                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3100                                 return -ENODEV;
3101                         }
3102                 }
3103                 tp->nvram_lock_cnt++;
3104         }
3105         return 0;
3106 }
3107
3108 /* tp->lock is held. */
3109 static void tg3_nvram_unlock(struct tg3 *tp)
3110 {
3111         if (tg3_flag(tp, NVRAM)) {
3112                 if (tp->nvram_lock_cnt > 0)
3113                         tp->nvram_lock_cnt--;
3114                 if (tp->nvram_lock_cnt == 0)
3115                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3116         }
3117 }
3118
3119 /* tp->lock is held. */
3120 static void tg3_enable_nvram_access(struct tg3 *tp)
3121 {
3122         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3123                 u32 nvaccess = tr32(NVRAM_ACCESS);
3124
3125                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3126         }
3127 }
3128
3129 /* tp->lock is held. */
3130 static void tg3_disable_nvram_access(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3133                 u32 nvaccess = tr32(NVRAM_ACCESS);
3134
3135                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3136         }
3137 }
3138
3139 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3140                                         u32 offset, u32 *val)
3141 {
3142         u32 tmp;
3143         int i;
3144
3145         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3146                 return -EINVAL;
3147
3148         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3149                                         EEPROM_ADDR_DEVID_MASK |
3150                                         EEPROM_ADDR_READ);
3151         tw32(GRC_EEPROM_ADDR,
3152              tmp |
3153              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3154              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3155               EEPROM_ADDR_ADDR_MASK) |
3156              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3157
3158         for (i = 0; i < 1000; i++) {
3159                 tmp = tr32(GRC_EEPROM_ADDR);
3160
3161                 if (tmp & EEPROM_ADDR_COMPLETE)
3162                         break;
3163                 msleep(1);
3164         }
3165         if (!(tmp & EEPROM_ADDR_COMPLETE))
3166                 return -EBUSY;
3167
3168         tmp = tr32(GRC_EEPROM_DATA);
3169
3170         /*
3171          * The data will always be opposite the native endian
3172          * format.  Perform a blind byteswap to compensate.
3173          */
3174         *val = swab32(tmp);
3175
3176         return 0;
3177 }
3178
3179 #define NVRAM_CMD_TIMEOUT 10000
3180
3181 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3182 {
3183         int i;
3184
3185         tw32(NVRAM_CMD, nvram_cmd);
3186         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3187                 udelay(10);
3188                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3189                         udelay(10);
3190                         break;
3191                 }
3192         }
3193
3194         if (i == NVRAM_CMD_TIMEOUT)
3195                 return -EBUSY;
3196
3197         return 0;
3198 }
3199
3200 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3201 {
3202         if (tg3_flag(tp, NVRAM) &&
3203             tg3_flag(tp, NVRAM_BUFFERED) &&
3204             tg3_flag(tp, FLASH) &&
3205             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3206             (tp->nvram_jedecnum == JEDEC_ATMEL))
3207
3208                 addr = ((addr / tp->nvram_pagesize) <<
3209                         ATMEL_AT45DB0X1B_PAGE_POS) +
3210                        (addr % tp->nvram_pagesize);
3211
3212         return addr;
3213 }
3214
3215 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3216 {
3217         if (tg3_flag(tp, NVRAM) &&
3218             tg3_flag(tp, NVRAM_BUFFERED) &&
3219             tg3_flag(tp, FLASH) &&
3220             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3221             (tp->nvram_jedecnum == JEDEC_ATMEL))
3222
3223                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3224                         tp->nvram_pagesize) +
3225                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3226
3227         return addr;
3228 }
3229
3230 /* NOTE: Data read in from NVRAM is byteswapped according to
3231  * the byteswapping settings for all other register accesses.
3232  * tg3 devices are BE devices, so on a BE machine, the data
3233  * returned will be exactly as it is seen in NVRAM.  On a LE
3234  * machine, the 32-bit value will be byteswapped.
3235  */
3236 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3237 {
3238         int ret;
3239
3240         if (!tg3_flag(tp, NVRAM))
3241                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3242
3243         offset = tg3_nvram_phys_addr(tp, offset);
3244
3245         if (offset > NVRAM_ADDR_MSK)
3246                 return -EINVAL;
3247
3248         ret = tg3_nvram_lock(tp);
3249         if (ret)
3250                 return ret;
3251
3252         tg3_enable_nvram_access(tp);
3253
3254         tw32(NVRAM_ADDR, offset);
3255         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3256                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3257
3258         if (ret == 0)
3259                 *val = tr32(NVRAM_RDDATA);
3260
3261         tg3_disable_nvram_access(tp);
3262
3263         tg3_nvram_unlock(tp);
3264
3265         return ret;
3266 }
3267
3268 /* Ensures NVRAM data is in bytestream format. */
3269 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3270 {
3271         u32 v;
3272         int res = tg3_nvram_read(tp, offset, &v);
3273         if (!res)
3274                 *val = cpu_to_be32(v);
3275         return res;
3276 }
3277
3278 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3279                                     u32 offset, u32 len, u8 *buf)
3280 {
3281         int i, j, rc = 0;
3282         u32 val;
3283
3284         for (i = 0; i < len; i += 4) {
3285                 u32 addr;
3286                 __be32 data;
3287
3288                 addr = offset + i;
3289
3290                 memcpy(&data, buf + i, 4);
3291
3292                 /*
3293                  * The SEEPROM interface expects the data to always be opposite
3294                  * the native endian format.  We accomplish this by reversing
3295                  * all the operations that would have been performed on the
3296                  * data from a call to tg3_nvram_read_be32().
3297                  */
3298                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3299
3300                 val = tr32(GRC_EEPROM_ADDR);
3301                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3302
3303                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3304                         EEPROM_ADDR_READ);
3305                 tw32(GRC_EEPROM_ADDR, val |
3306                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3307                         (addr & EEPROM_ADDR_ADDR_MASK) |
3308                         EEPROM_ADDR_START |
3309                         EEPROM_ADDR_WRITE);
3310
3311                 for (j = 0; j < 1000; j++) {
3312                         val = tr32(GRC_EEPROM_ADDR);
3313
3314                         if (val & EEPROM_ADDR_COMPLETE)
3315                                 break;
3316                         msleep(1);
3317                 }
3318                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3319                         rc = -EBUSY;
3320                         break;
3321                 }
3322         }
3323
3324         return rc;
3325 }
3326
3327 /* offset and length are dword aligned */
3328 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3329                 u8 *buf)
3330 {
3331         int ret = 0;
3332         u32 pagesize = tp->nvram_pagesize;
3333         u32 pagemask = pagesize - 1;
3334         u32 nvram_cmd;
3335         u8 *tmp;
3336
3337         tmp = kmalloc(pagesize, GFP_KERNEL);
3338         if (tmp == NULL)
3339                 return -ENOMEM;
3340
3341         while (len) {
3342                 int j;
3343                 u32 phy_addr, page_off, size;
3344
3345                 phy_addr = offset & ~pagemask;
3346
3347                 for (j = 0; j < pagesize; j += 4) {
3348                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3349                                                   (__be32 *) (tmp + j));
3350                         if (ret)
3351                                 break;
3352                 }
3353                 if (ret)
3354                         break;
3355
3356                 page_off = offset & pagemask;
3357                 size = pagesize;
3358                 if (len < size)
3359                         size = len;
3360
3361                 len -= size;
3362
3363                 memcpy(tmp + page_off, buf, size);
3364
3365                 offset = offset + (pagesize - page_off);
3366
3367                 tg3_enable_nvram_access(tp);
3368
3369                 /*
3370                  * Before we can erase the flash page, we need
3371                  * to issue a special "write enable" command.
3372                  */
3373                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3374
3375                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3376                         break;
3377
3378                 /* Erase the target page */
3379                 tw32(NVRAM_ADDR, phy_addr);
3380
3381                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3382                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3383
3384                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3385                         break;
3386
3387                 /* Issue another write enable to start the write. */
3388                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3389
3390                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3391                         break;
3392
3393                 for (j = 0; j < pagesize; j += 4) {
3394                         __be32 data;
3395
3396                         data = *((__be32 *) (tmp + j));
3397
3398                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3399
3400                         tw32(NVRAM_ADDR, phy_addr + j);
3401
3402                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3403                                 NVRAM_CMD_WR;
3404
3405                         if (j == 0)
3406                                 nvram_cmd |= NVRAM_CMD_FIRST;
3407                         else if (j == (pagesize - 4))
3408                                 nvram_cmd |= NVRAM_CMD_LAST;
3409
3410                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3411                         if (ret)
3412                                 break;
3413                 }
3414                 if (ret)
3415                         break;
3416         }
3417
3418         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419         tg3_nvram_exec_cmd(tp, nvram_cmd);
3420
3421         kfree(tmp);
3422
3423         return ret;
3424 }
3425
3426 /* offset and length are dword aligned */
3427 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3428                 u8 *buf)
3429 {
3430         int i, ret = 0;
3431
3432         for (i = 0; i < len; i += 4, offset += 4) {
3433                 u32 page_off, phy_addr, nvram_cmd;
3434                 __be32 data;
3435
3436                 memcpy(&data, buf + i, 4);
3437                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3438
3439                 page_off = offset % tp->nvram_pagesize;
3440
3441                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3442
3443                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3444
3445                 if (page_off == 0 || i == 0)
3446                         nvram_cmd |= NVRAM_CMD_FIRST;
3447                 if (page_off == (tp->nvram_pagesize - 4))
3448                         nvram_cmd |= NVRAM_CMD_LAST;
3449
3450                 if (i == (len - 4))
3451                         nvram_cmd |= NVRAM_CMD_LAST;
3452
3453                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3454                     !tg3_flag(tp, FLASH) ||
3455                     !tg3_flag(tp, 57765_PLUS))
3456                         tw32(NVRAM_ADDR, phy_addr);
3457
3458                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3459                     !tg3_flag(tp, 5755_PLUS) &&
3460                     (tp->nvram_jedecnum == JEDEC_ST) &&
3461                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3462                         u32 cmd;
3463
3464                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465                         ret = tg3_nvram_exec_cmd(tp, cmd);
3466                         if (ret)
3467                                 break;
3468                 }
3469                 if (!tg3_flag(tp, FLASH)) {
3470                         /* We always do complete word writes to eeprom. */
3471                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3472                 }
3473
3474                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3475                 if (ret)
3476                         break;
3477         }
3478         return ret;
3479 }
3480
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3483 {
3484         int ret;
3485
3486         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3487                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3488                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3489                 udelay(40);
3490         }
3491
3492         if (!tg3_flag(tp, NVRAM)) {
3493                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3494         } else {
3495                 u32 grc_mode;
3496
3497                 ret = tg3_nvram_lock(tp);
3498                 if (ret)
3499                         return ret;
3500
3501                 tg3_enable_nvram_access(tp);
3502                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3503                         tw32(NVRAM_WRITE1, 0x406);
3504
3505                 grc_mode = tr32(GRC_MODE);
3506                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3507
3508                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3509                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3510                                 buf);
3511                 } else {
3512                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3513                                 buf);
3514                 }
3515
3516                 grc_mode = tr32(GRC_MODE);
3517                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3518
3519                 tg3_disable_nvram_access(tp);
3520                 tg3_nvram_unlock(tp);
3521         }
3522
3523         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3524                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3525                 udelay(40);
3526         }
3527
3528         return ret;
3529 }
3530
3531 #define RX_CPU_SCRATCH_BASE     0x30000
3532 #define RX_CPU_SCRATCH_SIZE     0x04000
3533 #define TX_CPU_SCRATCH_BASE     0x34000
3534 #define TX_CPU_SCRATCH_SIZE     0x04000
3535
3536 /* tp->lock is held. */
3537 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3538 {
3539         int i;
3540         const int iters = 10000;
3541
3542         for (i = 0; i < iters; i++) {
3543                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3544                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3545                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3546                         break;
3547         }
3548
3549         return (i == iters) ? -EBUSY : 0;
3550 }
3551
3552 /* tp->lock is held. */
3553 static int tg3_rxcpu_pause(struct tg3 *tp)
3554 {
3555         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3556
3557         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3558         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3559         udelay(10);
3560
3561         return rc;
3562 }
3563
3564 /* tp->lock is held. */
3565 static int tg3_txcpu_pause(struct tg3 *tp)
3566 {
3567         return tg3_pause_cpu(tp, TX_CPU_BASE);
3568 }
3569
3570 /* tp->lock is held. */
3571 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3572 {
3573         tw32(cpu_base + CPU_STATE, 0xffffffff);
3574         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3575 }
3576
3577 /* tp->lock is held. */
3578 static void tg3_rxcpu_resume(struct tg3 *tp)
3579 {
3580         tg3_resume_cpu(tp, RX_CPU_BASE);
3581 }
3582
3583 /* tp->lock is held. */
3584 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3585 {
3586         int rc;
3587
3588         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3589
3590         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3591                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3592
3593                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3594                 return 0;
3595         }
3596         if (cpu_base == RX_CPU_BASE) {
3597                 rc = tg3_rxcpu_pause(tp);
3598         } else {
3599                 /*
3600                  * There is only an Rx CPU for the 5750 derivative in the
3601                  * BCM4785.
3602                  */
3603                 if (tg3_flag(tp, IS_SSB_CORE))
3604                         return 0;
3605
3606                 rc = tg3_txcpu_pause(tp);
3607         }
3608
3609         if (rc) {
3610                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3611                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3612                 return -ENODEV;
3613         }
3614
3615         /* Clear firmware's nvram arbitration. */
3616         if (tg3_flag(tp, NVRAM))
3617                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3618         return 0;
3619 }
3620
3621 static int tg3_fw_data_len(struct tg3 *tp,
3622                            const struct tg3_firmware_hdr *fw_hdr)
3623 {
3624         int fw_len;
3625
3626         /* Non fragmented firmware have one firmware header followed by a
3627          * contiguous chunk of data to be written. The length field in that
3628          * header is not the length of data to be written but the complete
3629          * length of the bss. The data length is determined based on
3630          * tp->fw->size minus headers.
3631          *
3632          * Fragmented firmware have a main header followed by multiple
3633          * fragments. Each fragment is identical to non fragmented firmware
3634          * with a firmware header followed by a contiguous chunk of data. In
3635          * the main header, the length field is unused and set to 0xffffffff.
3636          * In each fragment header the length is the entire size of that
3637          * fragment i.e. fragment data + header length. Data length is
3638          * therefore length field in the header minus TG3_FW_HDR_LEN.
3639          */
3640         if (tp->fw_len == 0xffffffff)
3641                 fw_len = be32_to_cpu(fw_hdr->len);
3642         else
3643                 fw_len = tp->fw->size;
3644
3645         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3646 }
3647
3648 /* tp->lock is held. */
3649 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3650                                  u32 cpu_scratch_base, int cpu_scratch_size,
3651                                  const struct tg3_firmware_hdr *fw_hdr)
3652 {
3653         int err, i;
3654         void (*write_op)(struct tg3 *, u32, u32);
3655         int total_len = tp->fw->size;
3656
3657         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3658                 netdev_err(tp->dev,
3659                            "%s: Trying to load TX cpu firmware which is 5705\n",
3660                            __func__);
3661                 return -EINVAL;
3662         }
3663
3664         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3665                 write_op = tg3_write_mem;
3666         else
3667                 write_op = tg3_write_indirect_reg32;
3668
3669         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3670                 /* It is possible that bootcode is still loading at this point.
3671                  * Get the nvram lock first before halting the cpu.
3672                  */
3673                 int lock_err = tg3_nvram_lock(tp);
3674                 err = tg3_halt_cpu(tp, cpu_base);
3675                 if (!lock_err)
3676                         tg3_nvram_unlock(tp);
3677                 if (err)
3678                         goto out;
3679
3680                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3681                         write_op(tp, cpu_scratch_base + i, 0);
3682                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3683                 tw32(cpu_base + CPU_MODE,
3684                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3685         } else {
3686                 /* Subtract additional main header for fragmented firmware and
3687                  * advance to the first fragment
3688                  */
3689                 total_len -= TG3_FW_HDR_LEN;
3690                 fw_hdr++;
3691         }
3692
3693         do {
3694                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3695                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3696                         write_op(tp, cpu_scratch_base +
3697                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3698                                      (i * sizeof(u32)),
3699                                  be32_to_cpu(fw_data[i]));
3700
3701                 total_len -= be32_to_cpu(fw_hdr->len);
3702
3703                 /* Advance to next fragment */
3704                 fw_hdr = (struct tg3_firmware_hdr *)
3705                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3706         } while (total_len > 0);
3707
3708         err = 0;
3709
3710 out:
3711         return err;
3712 }
3713
3714 /* tp->lock is held. */
3715 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3716 {
3717         int i;
3718         const int iters = 5;
3719
3720         tw32(cpu_base + CPU_STATE, 0xffffffff);
3721         tw32_f(cpu_base + CPU_PC, pc);
3722
3723         for (i = 0; i < iters; i++) {
3724                 if (tr32(cpu_base + CPU_PC) == pc)
3725                         break;
3726                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3727                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3728                 tw32_f(cpu_base + CPU_PC, pc);
3729                 udelay(1000);
3730         }
3731
3732         return (i == iters) ? -EBUSY : 0;
3733 }
3734
3735 /* tp->lock is held. */
3736 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3737 {
3738         const struct tg3_firmware_hdr *fw_hdr;
3739         int err;
3740
3741         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3742
3743         /* Firmware blob starts with version numbers, followed by
3744            start address and length. We are setting complete length.
3745            length = end_address_of_bss - start_address_of_text.
3746            Remainder is the blob to be loaded contiguously
3747            from start address. */
3748
3749         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3750                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3751                                     fw_hdr);
3752         if (err)
3753                 return err;
3754
3755         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3756                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3757                                     fw_hdr);
3758         if (err)
3759                 return err;
3760
3761         /* Now startup only the RX cpu. */
3762         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3763                                        be32_to_cpu(fw_hdr->base_addr));
3764         if (err) {
3765                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3766                            "should be %08x\n", __func__,
3767                            tr32(RX_CPU_BASE + CPU_PC),
3768                                 be32_to_cpu(fw_hdr->base_addr));
3769                 return -ENODEV;
3770         }
3771
3772         tg3_rxcpu_resume(tp);
3773
3774         return 0;
3775 }
3776
3777 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3778 {
3779         const int iters = 1000;
3780         int i;
3781         u32 val;
3782
3783         /* Wait for boot code to complete initialization and enter service
3784          * loop. It is then safe to download service patches
3785          */
3786         for (i = 0; i < iters; i++) {
3787                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3788                         break;
3789
3790                 udelay(10);
3791         }
3792
3793         if (i == iters) {
3794                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3795                 return -EBUSY;
3796         }
3797
3798         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3799         if (val & 0xff) {
3800                 netdev_warn(tp->dev,
3801                             "Other patches exist. Not downloading EEE patch\n");
3802                 return -EEXIST;
3803         }
3804
3805         return 0;
3806 }
3807
3808 /* tp->lock is held. */
3809 static void tg3_load_57766_firmware(struct tg3 *tp)
3810 {
3811         struct tg3_firmware_hdr *fw_hdr;
3812
3813         if (!tg3_flag(tp, NO_NVRAM))
3814                 return;
3815
3816         if (tg3_validate_rxcpu_state(tp))
3817                 return;
3818
3819         if (!tp->fw)
3820                 return;
3821
3822         /* This firmware blob has a different format than older firmware
3823          * releases as given below. The main difference is we have fragmented
3824          * data to be written to non-contiguous locations.
3825          *
3826          * In the beginning we have a firmware header identical to other
3827          * firmware which consists of version, base addr and length. The length
3828          * here is unused and set to 0xffffffff.
3829          *
3830          * This is followed by a series of firmware fragments which are
3831          * individually identical to previous firmware. i.e. they have the
3832          * firmware header and followed by data for that fragment. The version
3833          * field of the individual fragment header is unused.
3834          */
3835
3836         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3837         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3838                 return;
3839
3840         if (tg3_rxcpu_pause(tp))
3841                 return;
3842
3843         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3844         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3845
3846         tg3_rxcpu_resume(tp);
3847 }
3848
3849 /* tp->lock is held. */
3850 static int tg3_load_tso_firmware(struct tg3 *tp)
3851 {
3852         const struct tg3_firmware_hdr *fw_hdr;
3853         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3854         int err;
3855
3856         if (!tg3_flag(tp, FW_TSO))
3857                 return 0;
3858
3859         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860
3861         /* Firmware blob starts with version numbers, followed by
3862            start address and length. We are setting complete length.
3863            length = end_address_of_bss - start_address_of_text.
3864            Remainder is the blob to be loaded contiguously
3865            from start address. */
3866
3867         cpu_scratch_size = tp->fw_len;
3868
3869         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3870                 cpu_base = RX_CPU_BASE;
3871                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3872         } else {
3873                 cpu_base = TX_CPU_BASE;
3874                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3875                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3876         }
3877
3878         err = tg3_load_firmware_cpu(tp, cpu_base,
3879                                     cpu_scratch_base, cpu_scratch_size,
3880                                     fw_hdr);
3881         if (err)
3882                 return err;
3883
3884         /* Now startup the cpu. */
3885         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3886                                        be32_to_cpu(fw_hdr->base_addr));
3887         if (err) {
3888                 netdev_err(tp->dev,
3889                            "%s fails to set CPU PC, is %08x should be %08x\n",
3890                            __func__, tr32(cpu_base + CPU_PC),
3891                            be32_to_cpu(fw_hdr->base_addr));
3892                 return -ENODEV;
3893         }
3894
3895         tg3_resume_cpu(tp, cpu_base);
3896         return 0;
3897 }
3898
3899
3900 /* tp->lock is held. */
3901 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3902 {
3903         u32 addr_high, addr_low;
3904         int i;
3905
3906         addr_high = ((tp->dev->dev_addr[0] << 8) |
3907                      tp->dev->dev_addr[1]);
3908         addr_low = ((tp->dev->dev_addr[2] << 24) |
3909                     (tp->dev->dev_addr[3] << 16) |
3910                     (tp->dev->dev_addr[4] <<  8) |
3911                     (tp->dev->dev_addr[5] <<  0));
3912         for (i = 0; i < 4; i++) {
3913                 if (i == 1 && skip_mac_1)
3914                         continue;
3915                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3916                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3917         }
3918
3919         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3920             tg3_asic_rev(tp) == ASIC_REV_5704) {
3921                 for (i = 0; i < 12; i++) {
3922                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3923                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3924                 }
3925         }
3926
3927         addr_high = (tp->dev->dev_addr[0] +
3928                      tp->dev->dev_addr[1] +
3929                      tp->dev->dev_addr[2] +
3930                      tp->dev->dev_addr[3] +
3931                      tp->dev->dev_addr[4] +
3932                      tp->dev->dev_addr[5]) &
3933                 TX_BACKOFF_SEED_MASK;
3934         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3935 }
3936
3937 static void tg3_enable_register_access(struct tg3 *tp)
3938 {
3939         /*
3940          * Make sure register accesses (indirect or otherwise) will function
3941          * correctly.
3942          */
3943         pci_write_config_dword(tp->pdev,
3944                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3945 }
3946
3947 static int tg3_power_up(struct tg3 *tp)
3948 {
3949         int err;
3950
3951         tg3_enable_register_access(tp);
3952
3953         err = pci_set_power_state(tp->pdev, PCI_D0);
3954         if (!err) {
3955                 /* Switch out of Vaux if it is a NIC */
3956                 tg3_pwrsrc_switch_to_vmain(tp);
3957         } else {
3958                 netdev_err(tp->dev, "Transition to D0 failed\n");
3959         }
3960
3961         return err;
3962 }
3963
3964 static int tg3_setup_phy(struct tg3 *, bool);
3965
3966 static int tg3_power_down_prepare(struct tg3 *tp)
3967 {
3968         u32 misc_host_ctrl;
3969         bool device_should_wake, do_low_power;
3970
3971         tg3_enable_register_access(tp);
3972
3973         /* Restore the CLKREQ setting. */
3974         if (tg3_flag(tp, CLKREQ_BUG))
3975                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3976                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3977
3978         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3979         tw32(TG3PCI_MISC_HOST_CTRL,
3980              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3981
3982         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3983                              tg3_flag(tp, WOL_ENABLE);
3984
3985         if (tg3_flag(tp, USE_PHYLIB)) {
3986                 do_low_power = false;
3987                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3988                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3989                         struct phy_device *phydev;
3990                         u32 phyid, advertising;
3991
3992                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3993
3994                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3995
3996                         tp->link_config.speed = phydev->speed;
3997                         tp->link_config.duplex = phydev->duplex;
3998                         tp->link_config.autoneg = phydev->autoneg;
3999                         tp->link_config.advertising = phydev->advertising;
4000
4001                         advertising = ADVERTISED_TP |
4002                                       ADVERTISED_Pause |
4003                                       ADVERTISED_Autoneg |
4004                                       ADVERTISED_10baseT_Half;
4005
4006                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4007                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4008                                         advertising |=
4009                                                 ADVERTISED_100baseT_Half |
4010                                                 ADVERTISED_100baseT_Full |
4011                                                 ADVERTISED_10baseT_Full;
4012                                 else
4013                                         advertising |= ADVERTISED_10baseT_Full;
4014                         }
4015
4016                         phydev->advertising = advertising;
4017
4018                         phy_start_aneg(phydev);
4019
4020                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4021                         if (phyid != PHY_ID_BCMAC131) {
4022                                 phyid &= PHY_BCM_OUI_MASK;
4023                                 if (phyid == PHY_BCM_OUI_1 ||
4024                                     phyid == PHY_BCM_OUI_2 ||
4025                                     phyid == PHY_BCM_OUI_3)
4026                                         do_low_power = true;
4027                         }
4028                 }
4029         } else {
4030                 do_low_power = true;
4031
4032                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4033                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4034
4035                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4036                         tg3_setup_phy(tp, false);
4037         }
4038
4039         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4040                 u32 val;
4041
4042                 val = tr32(GRC_VCPU_EXT_CTRL);
4043                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4044         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4045                 int i;
4046                 u32 val;
4047
4048                 for (i = 0; i < 200; i++) {
4049                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4050                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4051                                 break;
4052                         msleep(1);
4053                 }
4054         }
4055         if (tg3_flag(tp, WOL_CAP))
4056                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4057                                                      WOL_DRV_STATE_SHUTDOWN |
4058                                                      WOL_DRV_WOL |
4059                                                      WOL_SET_MAGIC_PKT);
4060
4061         if (device_should_wake) {
4062                 u32 mac_mode;
4063
4064                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4065                         if (do_low_power &&
4066                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4067                                 tg3_phy_auxctl_write(tp,
4068                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4069                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4070                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4071                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4072                                 udelay(40);
4073                         }
4074
4075                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4076                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4077                         else if (tp->phy_flags &
4078                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4079                                 if (tp->link_config.active_speed == SPEED_1000)
4080                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4081                                 else
4082                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4083                         } else
4084                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4085
4086                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4087                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4088                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4089                                              SPEED_100 : SPEED_10;
4090                                 if (tg3_5700_link_polarity(tp, speed))
4091                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4092                                 else
4093                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4094                         }
4095                 } else {
4096                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4097                 }
4098
4099                 if (!tg3_flag(tp, 5750_PLUS))
4100                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4101
4102                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4103                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4104                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4105                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4106
4107                 if (tg3_flag(tp, ENABLE_APE))
4108                         mac_mode |= MAC_MODE_APE_TX_EN |
4109                                     MAC_MODE_APE_RX_EN |
4110                                     MAC_MODE_TDE_ENABLE;
4111
4112                 tw32_f(MAC_MODE, mac_mode);
4113                 udelay(100);
4114
4115                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4116                 udelay(10);
4117         }
4118
4119         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4120             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4121              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4122                 u32 base_val;
4123
4124                 base_val = tp->pci_clock_ctrl;
4125                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4126                              CLOCK_CTRL_TXCLK_DISABLE);
4127
4128                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4129                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4130         } else if (tg3_flag(tp, 5780_CLASS) ||
4131                    tg3_flag(tp, CPMU_PRESENT) ||
4132                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4133                 /* do nothing */
4134         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4135                 u32 newbits1, newbits2;
4136
4137                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4138                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4139                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4140                                     CLOCK_CTRL_TXCLK_DISABLE |
4141                                     CLOCK_CTRL_ALTCLK);
4142                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4143                 } else if (tg3_flag(tp, 5705_PLUS)) {
4144                         newbits1 = CLOCK_CTRL_625_CORE;
4145                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4146                 } else {
4147                         newbits1 = CLOCK_CTRL_ALTCLK;
4148                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4149                 }
4150
4151                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4152                             40);
4153
4154                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4155                             40);
4156
4157                 if (!tg3_flag(tp, 5705_PLUS)) {
4158                         u32 newbits3;
4159
4160                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4162                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4163                                             CLOCK_CTRL_TXCLK_DISABLE |
4164                                             CLOCK_CTRL_44MHZ_CORE);
4165                         } else {
4166                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4167                         }
4168
4169                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4170                                     tp->pci_clock_ctrl | newbits3, 40);
4171                 }
4172         }
4173
4174         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4175                 tg3_power_down_phy(tp, do_low_power);
4176
4177         tg3_frob_aux_power(tp, true);
4178
4179         /* Workaround for unstable PLL clock */
4180         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4181             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4182              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4183                 u32 val = tr32(0x7d00);
4184
4185                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4186                 tw32(0x7d00, val);
4187                 if (!tg3_flag(tp, ENABLE_ASF)) {
4188                         int err;
4189
4190                         err = tg3_nvram_lock(tp);
4191                         tg3_halt_cpu(tp, RX_CPU_BASE);
4192                         if (!err)
4193                                 tg3_nvram_unlock(tp);
4194                 }
4195         }
4196
4197         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4198
4199         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4200
4201         return 0;
4202 }
4203
4204 static void tg3_power_down(struct tg3 *tp)
4205 {
4206         tg3_power_down_prepare(tp);
4207
4208         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4209         pci_set_power_state(tp->pdev, PCI_D3hot);
4210 }
4211
4212 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4213 {
4214         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4215         case MII_TG3_AUX_STAT_10HALF:
4216                 *speed = SPEED_10;
4217                 *duplex = DUPLEX_HALF;
4218                 break;
4219
4220         case MII_TG3_AUX_STAT_10FULL:
4221                 *speed = SPEED_10;
4222                 *duplex = DUPLEX_FULL;
4223                 break;
4224
4225         case MII_TG3_AUX_STAT_100HALF:
4226                 *speed = SPEED_100;
4227                 *duplex = DUPLEX_HALF;
4228                 break;
4229
4230         case MII_TG3_AUX_STAT_100FULL:
4231                 *speed = SPEED_100;
4232                 *duplex = DUPLEX_FULL;
4233                 break;
4234
4235         case MII_TG3_AUX_STAT_1000HALF:
4236                 *speed = SPEED_1000;
4237                 *duplex = DUPLEX_HALF;
4238                 break;
4239
4240         case MII_TG3_AUX_STAT_1000FULL:
4241                 *speed = SPEED_1000;
4242                 *duplex = DUPLEX_FULL;
4243                 break;
4244
4245         default:
4246                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4247                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4248                                  SPEED_10;
4249                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4250                                   DUPLEX_HALF;
4251                         break;
4252                 }
4253                 *speed = SPEED_UNKNOWN;
4254                 *duplex = DUPLEX_UNKNOWN;
4255                 break;
4256         }
4257 }
4258
4259 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4260 {
4261         int err = 0;
4262         u32 val, new_adv;
4263
4264         new_adv = ADVERTISE_CSMA;
4265         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4266         new_adv |= mii_advertise_flowctrl(flowctrl);
4267
4268         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4269         if (err)
4270                 goto done;
4271
4272         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4273                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4274
4275                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4276                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4277                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4278
4279                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4280                 if (err)
4281                         goto done;
4282         }
4283
4284         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4285                 goto done;
4286
4287         tw32(TG3_CPMU_EEE_MODE,
4288              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4289
4290         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4291         if (!err) {
4292                 u32 err2;
4293
4294                 val = 0;
4295                 /* Advertise 100-BaseTX EEE ability */
4296                 if (advertise & ADVERTISED_100baseT_Full)
4297                         val |= MDIO_AN_EEE_ADV_100TX;
4298                 /* Advertise 1000-BaseT EEE ability */
4299                 if (advertise & ADVERTISED_1000baseT_Full)
4300                         val |= MDIO_AN_EEE_ADV_1000T;
4301
4302                 if (!tp->eee.eee_enabled) {
4303                         val = 0;
4304                         tp->eee.advertised = 0;
4305                 } else {
4306                         tp->eee.advertised = advertise &
4307                                              (ADVERTISED_100baseT_Full |
4308                                               ADVERTISED_1000baseT_Full);
4309                 }
4310
4311                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4312                 if (err)
4313                         val = 0;
4314
4315                 switch (tg3_asic_rev(tp)) {
4316                 case ASIC_REV_5717:
4317                 case ASIC_REV_57765:
4318                 case ASIC_REV_57766:
4319                 case ASIC_REV_5719:
4320                         /* If we advertised any eee advertisements above... */
4321                         if (val)
4322                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4323                                       MII_TG3_DSP_TAP26_RMRXSTO |
4324                                       MII_TG3_DSP_TAP26_OPCSINPT;
4325                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4326                         /* Fall through */
4327                 case ASIC_REV_5720:
4328                 case ASIC_REV_5762:
4329                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4330                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4331                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4332                 }
4333
4334                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4335                 if (!err)
4336                         err = err2;
4337         }
4338
4339 done:
4340         return err;
4341 }
4342
4343 static void tg3_phy_copper_begin(struct tg3 *tp)
4344 {
4345         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4346             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4347                 u32 adv, fc;
4348
4349                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4350                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4351                         adv = ADVERTISED_10baseT_Half |
4352                               ADVERTISED_10baseT_Full;
4353                         if (tg3_flag(tp, WOL_SPEED_100MB))
4354                                 adv |= ADVERTISED_100baseT_Half |
4355                                        ADVERTISED_100baseT_Full;
4356                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4357                                 adv |= ADVERTISED_1000baseT_Half |
4358                                        ADVERTISED_1000baseT_Full;
4359
4360                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4361                 } else {
4362                         adv = tp->link_config.advertising;
4363                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4364                                 adv &= ~(ADVERTISED_1000baseT_Half |
4365                                          ADVERTISED_1000baseT_Full);
4366
4367                         fc = tp->link_config.flowctrl;
4368                 }
4369
4370                 tg3_phy_autoneg_cfg(tp, adv, fc);
4371
4372                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4373                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4374                         /* Normally during power down we want to autonegotiate
4375                          * the lowest possible speed for WOL. However, to avoid
4376                          * link flap, we leave it untouched.
4377                          */
4378                         return;
4379                 }
4380
4381                 tg3_writephy(tp, MII_BMCR,
4382                              BMCR_ANENABLE | BMCR_ANRESTART);
4383         } else {
4384                 int i;
4385                 u32 bmcr, orig_bmcr;
4386
4387                 tp->link_config.active_speed = tp->link_config.speed;
4388                 tp->link_config.active_duplex = tp->link_config.duplex;
4389
4390                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4391                         /* With autoneg disabled, 5715 only links up when the
4392                          * advertisement register has the configured speed
4393                          * enabled.
4394                          */
4395                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4396                 }
4397
4398                 bmcr = 0;
4399                 switch (tp->link_config.speed) {
4400                 default:
4401                 case SPEED_10:
4402                         break;
4403
4404                 case SPEED_100:
4405                         bmcr |= BMCR_SPEED100;
4406                         break;
4407
4408                 case SPEED_1000:
4409                         bmcr |= BMCR_SPEED1000;
4410                         break;
4411                 }
4412
4413                 if (tp->link_config.duplex == DUPLEX_FULL)
4414                         bmcr |= BMCR_FULLDPLX;
4415
4416                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4417                     (bmcr != orig_bmcr)) {
4418                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4419                         for (i = 0; i < 1500; i++) {
4420                                 u32 tmp;
4421
4422                                 udelay(10);
4423                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4424                                     tg3_readphy(tp, MII_BMSR, &tmp))
4425                                         continue;
4426                                 if (!(tmp & BMSR_LSTATUS)) {
4427                                         udelay(40);
4428                                         break;
4429                                 }
4430                         }
4431                         tg3_writephy(tp, MII_BMCR, bmcr);
4432                         udelay(40);
4433                 }
4434         }
4435 }
4436
4437 static int tg3_phy_pull_config(struct tg3 *tp)
4438 {
4439         int err;
4440         u32 val;
4441
4442         err = tg3_readphy(tp, MII_BMCR, &val);
4443         if (err)
4444                 goto done;
4445
4446         if (!(val & BMCR_ANENABLE)) {
4447                 tp->link_config.autoneg = AUTONEG_DISABLE;
4448                 tp->link_config.advertising = 0;
4449                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4450
4451                 err = -EIO;
4452
4453                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4454                 case 0:
4455                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4456                                 goto done;
4457
4458                         tp->link_config.speed = SPEED_10;
4459                         break;
4460                 case BMCR_SPEED100:
4461                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4462                                 goto done;
4463
4464                         tp->link_config.speed = SPEED_100;
4465                         break;
4466                 case BMCR_SPEED1000:
4467                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468                                 tp->link_config.speed = SPEED_1000;
4469                                 break;
4470                         }
4471                         /* Fall through */
4472                 default:
4473                         goto done;
4474                 }
4475
4476                 if (val & BMCR_FULLDPLX)
4477                         tp->link_config.duplex = DUPLEX_FULL;
4478                 else
4479                         tp->link_config.duplex = DUPLEX_HALF;
4480
4481                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4482
4483                 err = 0;
4484                 goto done;
4485         }
4486
4487         tp->link_config.autoneg = AUTONEG_ENABLE;
4488         tp->link_config.advertising = ADVERTISED_Autoneg;
4489         tg3_flag_set(tp, PAUSE_AUTONEG);
4490
4491         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4492                 u32 adv;
4493
4494                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4495                 if (err)
4496                         goto done;
4497
4498                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4499                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4500
4501                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4502         } else {
4503                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4504         }
4505
4506         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4507                 u32 adv;
4508
4509                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4510                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4511                         if (err)
4512                                 goto done;
4513
4514                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4515                 } else {
4516                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4517                         if (err)
4518                                 goto done;
4519
4520                         adv = tg3_decode_flowctrl_1000X(val);
4521                         tp->link_config.flowctrl = adv;
4522
4523                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4524                         adv = mii_adv_to_ethtool_adv_x(val);
4525                 }
4526
4527                 tp->link_config.advertising |= adv;
4528         }
4529
4530 done:
4531         return err;
4532 }
4533
4534 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4535 {
4536         int err;
4537
4538         /* Turn off tap power management. */
4539         /* Set Extended packet length bit */
4540         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4541
4542         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4543         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4544         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4545         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4546         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4547
4548         udelay(40);
4549
4550         return err;
4551 }
4552
4553 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4554 {
4555         struct ethtool_eee eee;
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558                 return true;
4559
4560         tg3_eee_pull_config(tp, &eee);
4561
4562         if (tp->eee.eee_enabled) {
4563                 if (tp->eee.advertised != eee.advertised ||
4564                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4565                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4566                         return false;
4567         } else {
4568                 /* EEE is disabled but we're advertising */
4569                 if (eee.advertised)
4570                         return false;
4571         }
4572
4573         return true;
4574 }
4575
4576 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4577 {
4578         u32 advmsk, tgtadv, advertising;
4579
4580         advertising = tp->link_config.advertising;
4581         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4582
4583         advmsk = ADVERTISE_ALL;
4584         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4585                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4586                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4587         }
4588
4589         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4590                 return false;
4591
4592         if ((*lcladv & advmsk) != tgtadv)
4593                 return false;
4594
4595         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4596                 u32 tg3_ctrl;
4597
4598                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4599
4600                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4601                         return false;
4602
4603                 if (tgtadv &&
4604                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4605                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4606                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4607                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4608                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4609                 } else {
4610                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4611                 }
4612
4613                 if (tg3_ctrl != tgtadv)
4614                         return false;
4615         }
4616
4617         return true;
4618 }
4619
4620 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4621 {
4622         u32 lpeth = 0;
4623
4624         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4625                 u32 val;
4626
4627                 if (tg3_readphy(tp, MII_STAT1000, &val))
4628                         return false;
4629
4630                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4631         }
4632
4633         if (tg3_readphy(tp, MII_LPA, rmtadv))
4634                 return false;
4635
4636         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4637         tp->link_config.rmt_adv = lpeth;
4638
4639         return true;
4640 }
4641
4642 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4643 {
4644         if (curr_link_up != tp->link_up) {
4645                 if (curr_link_up) {
4646                         netif_carrier_on(tp->dev);
4647                 } else {
4648                         netif_carrier_off(tp->dev);
4649                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4650                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4651                 }
4652
4653                 tg3_link_report(tp);
4654                 return true;
4655         }
4656
4657         return false;
4658 }
4659
4660 static void tg3_clear_mac_status(struct tg3 *tp)
4661 {
4662         tw32(MAC_EVENT, 0);
4663
4664         tw32_f(MAC_STATUS,
4665                MAC_STATUS_SYNC_CHANGED |
4666                MAC_STATUS_CFG_CHANGED |
4667                MAC_STATUS_MI_COMPLETION |
4668                MAC_STATUS_LNKSTATE_CHANGED);
4669         udelay(40);
4670 }
4671
4672 static void tg3_setup_eee(struct tg3 *tp)
4673 {
4674         u32 val;
4675
4676         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4677               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4678         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4679                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4680
4681         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4682
4683         tw32_f(TG3_CPMU_EEE_CTRL,
4684                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4685
4686         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4687               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4688               TG3_CPMU_EEEMD_LPI_IN_RX |
4689               TG3_CPMU_EEEMD_EEE_ENABLE;
4690
4691         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4692                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4693
4694         if (tg3_flag(tp, ENABLE_APE))
4695                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4696
4697         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4698
4699         tw32_f(TG3_CPMU_EEE_DBTMR1,
4700                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4701                (tp->eee.tx_lpi_timer & 0xffff));
4702
4703         tw32_f(TG3_CPMU_EEE_DBTMR2,
4704                TG3_CPMU_DBTMR2_APE_TX_2047US |
4705                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4706 }
4707
4708 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4709 {
4710         bool current_link_up;
4711         u32 bmsr, val;
4712         u32 lcl_adv, rmt_adv;
4713         u16 current_speed;
4714         u8 current_duplex;
4715         int i, err;
4716
4717         tg3_clear_mac_status(tp);
4718
4719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4720                 tw32_f(MAC_MI_MODE,
4721                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4722                 udelay(80);
4723         }
4724
4725         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4726
4727         /* Some third-party PHYs need to be reset on link going
4728          * down.
4729          */
4730         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4731              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4732              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4733             tp->link_up) {
4734                 tg3_readphy(tp, MII_BMSR, &bmsr);
4735                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4736                     !(bmsr & BMSR_LSTATUS))
4737                         force_reset = true;
4738         }
4739         if (force_reset)
4740                 tg3_phy_reset(tp);
4741
4742         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4743                 tg3_readphy(tp, MII_BMSR, &bmsr);
4744                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4745                     !tg3_flag(tp, INIT_COMPLETE))
4746                         bmsr = 0;
4747
4748                 if (!(bmsr & BMSR_LSTATUS)) {
4749                         err = tg3_init_5401phy_dsp(tp);
4750                         if (err)
4751                                 return err;
4752
4753                         tg3_readphy(tp, MII_BMSR, &bmsr);
4754                         for (i = 0; i < 1000; i++) {
4755                                 udelay(10);
4756                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4757                                     (bmsr & BMSR_LSTATUS)) {
4758                                         udelay(40);
4759                                         break;
4760                                 }
4761                         }
4762
4763                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4764                             TG3_PHY_REV_BCM5401_B0 &&
4765                             !(bmsr & BMSR_LSTATUS) &&
4766                             tp->link_config.active_speed == SPEED_1000) {
4767                                 err = tg3_phy_reset(tp);
4768                                 if (!err)
4769                                         err = tg3_init_5401phy_dsp(tp);
4770                                 if (err)
4771                                         return err;
4772                         }
4773                 }
4774         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4775                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4776                 /* 5701 {A0,B0} CRC bug workaround */
4777                 tg3_writephy(tp, 0x15, 0x0a75);
4778                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4779                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4780                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4781         }
4782
4783         /* Clear pending interrupts... */
4784         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4785         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4786
4787         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4788                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4789         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4790                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4791
4792         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4793             tg3_asic_rev(tp) == ASIC_REV_5701) {
4794                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4795                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4796                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4797                 else
4798                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4799         }
4800
4801         current_link_up = false;
4802         current_speed = SPEED_UNKNOWN;
4803         current_duplex = DUPLEX_UNKNOWN;
4804         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4805         tp->link_config.rmt_adv = 0;
4806
4807         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4808                 err = tg3_phy_auxctl_read(tp,
4809                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4810                                           &val);
4811                 if (!err && !(val & (1 << 10))) {
4812                         tg3_phy_auxctl_write(tp,
4813                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4814                                              val | (1 << 10));
4815                         goto relink;
4816                 }
4817         }
4818
4819         bmsr = 0;
4820         for (i = 0; i < 100; i++) {
4821                 tg3_readphy(tp, MII_BMSR, &bmsr);
4822                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823                     (bmsr & BMSR_LSTATUS))
4824                         break;
4825                 udelay(40);
4826         }
4827
4828         if (bmsr & BMSR_LSTATUS) {
4829                 u32 aux_stat, bmcr;
4830
4831                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4832                 for (i = 0; i < 2000; i++) {
4833                         udelay(10);
4834                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4835                             aux_stat)
4836                                 break;
4837                 }
4838
4839                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4840                                              &current_speed,
4841                                              &current_duplex);
4842
4843                 bmcr = 0;
4844                 for (i = 0; i < 200; i++) {
4845                         tg3_readphy(tp, MII_BMCR, &bmcr);
4846                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4847                                 continue;
4848                         if (bmcr && bmcr != 0x7fff)
4849                                 break;
4850                         udelay(10);
4851                 }
4852
4853                 lcl_adv = 0;
4854                 rmt_adv = 0;
4855
4856                 tp->link_config.active_speed = current_speed;
4857                 tp->link_config.active_duplex = current_duplex;
4858
4859                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4860                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4861
4862                         if ((bmcr & BMCR_ANENABLE) &&
4863                             eee_config_ok &&
4864                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4865                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4866                                 current_link_up = true;
4867
4868                         /* EEE settings changes take effect only after a phy
4869                          * reset.  If we have skipped a reset due to Link Flap
4870                          * Avoidance being enabled, do it now.
4871                          */
4872                         if (!eee_config_ok &&
4873                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4874                             !force_reset) {
4875                                 tg3_setup_eee(tp);
4876                                 tg3_phy_reset(tp);
4877                         }
4878                 } else {
4879                         if (!(bmcr & BMCR_ANENABLE) &&
4880                             tp->link_config.speed == current_speed &&
4881                             tp->link_config.duplex == current_duplex) {
4882                                 current_link_up = true;
4883                         }
4884                 }
4885
4886                 if (current_link_up &&
4887                     tp->link_config.active_duplex == DUPLEX_FULL) {
4888                         u32 reg, bit;
4889
4890                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4891                                 reg = MII_TG3_FET_GEN_STAT;
4892                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4893                         } else {
4894                                 reg = MII_TG3_EXT_STAT;
4895                                 bit = MII_TG3_EXT_STAT_MDIX;
4896                         }
4897
4898                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4899                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4900
4901                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4902                 }
4903         }
4904
4905 relink:
4906         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4907                 tg3_phy_copper_begin(tp);
4908
4909                 if (tg3_flag(tp, ROBOSWITCH)) {
4910                         current_link_up = true;
4911                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4912                         current_speed = SPEED_1000;
4913                         current_duplex = DUPLEX_FULL;
4914                         tp->link_config.active_speed = current_speed;
4915                         tp->link_config.active_duplex = current_duplex;
4916                 }
4917
4918                 tg3_readphy(tp, MII_BMSR, &bmsr);
4919                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4920                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4921                         current_link_up = true;
4922         }
4923
4924         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4925         if (current_link_up) {
4926                 if (tp->link_config.active_speed == SPEED_100 ||
4927                     tp->link_config.active_speed == SPEED_10)
4928                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4929                 else
4930                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4931         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4932                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4933         else
4934                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4935
4936         /* In order for the 5750 core in BCM4785 chip to work properly
4937          * in RGMII mode, the Led Control Register must be set up.
4938          */
4939         if (tg3_flag(tp, RGMII_MODE)) {
4940                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4941                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4942
4943                 if (tp->link_config.active_speed == SPEED_10)
4944                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4945                 else if (tp->link_config.active_speed == SPEED_100)
4946                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4947                                      LED_CTRL_100MBPS_ON);
4948                 else if (tp->link_config.active_speed == SPEED_1000)
4949                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4950                                      LED_CTRL_1000MBPS_ON);
4951
4952                 tw32(MAC_LED_CTRL, led_ctrl);
4953                 udelay(40);
4954         }
4955
4956         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4957         if (tp->link_config.active_duplex == DUPLEX_HALF)
4958                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4959
4960         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4961                 if (current_link_up &&
4962                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4963                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4964                 else
4965                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4966         }
4967
4968         /* ??? Without this setting Netgear GA302T PHY does not
4969          * ??? send/receive packets...
4970          */
4971         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4972             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4973                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4974                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4975                 udelay(80);
4976         }
4977
4978         tw32_f(MAC_MODE, tp->mac_mode);
4979         udelay(40);
4980
4981         tg3_phy_eee_adjust(tp, current_link_up);
4982
4983         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4984                 /* Polled via timer. */
4985                 tw32_f(MAC_EVENT, 0);
4986         } else {
4987                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4988         }
4989         udelay(40);
4990
4991         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4992             current_link_up &&
4993             tp->link_config.active_speed == SPEED_1000 &&
4994             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4995                 udelay(120);
4996                 tw32_f(MAC_STATUS,
4997                      (MAC_STATUS_SYNC_CHANGED |
4998                       MAC_STATUS_CFG_CHANGED));
4999                 udelay(40);
5000                 tg3_write_mem(tp,
5001                               NIC_SRAM_FIRMWARE_MBOX,
5002                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5003         }
5004
5005         /* Prevent send BD corruption. */
5006         if (tg3_flag(tp, CLKREQ_BUG)) {
5007                 if (tp->link_config.active_speed == SPEED_100 ||
5008                     tp->link_config.active_speed == SPEED_10)
5009                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5010                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5011                 else
5012                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5013                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5014         }
5015
5016         tg3_test_and_report_link_chg(tp, current_link_up);
5017
5018         return 0;
5019 }
5020
5021 struct tg3_fiber_aneginfo {
5022         int state;
5023 #define ANEG_STATE_UNKNOWN              0
5024 #define ANEG_STATE_AN_ENABLE            1
5025 #define ANEG_STATE_RESTART_INIT         2
5026 #define ANEG_STATE_RESTART              3
5027 #define ANEG_STATE_DISABLE_LINK_OK      4
5028 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5029 #define ANEG_STATE_ABILITY_DETECT       6
5030 #define ANEG_STATE_ACK_DETECT_INIT      7
5031 #define ANEG_STATE_ACK_DETECT           8
5032 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5033 #define ANEG_STATE_COMPLETE_ACK         10
5034 #define ANEG_STATE_IDLE_DETECT_INIT     11
5035 #define ANEG_STATE_IDLE_DETECT          12
5036 #define ANEG_STATE_LINK_OK              13
5037 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5038 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5039
5040         u32 flags;
5041 #define MR_AN_ENABLE            0x00000001
5042 #define MR_RESTART_AN           0x00000002
5043 #define MR_AN_COMPLETE          0x00000004
5044 #define MR_PAGE_RX              0x00000008
5045 #define MR_NP_LOADED            0x00000010
5046 #define MR_TOGGLE_TX            0x00000020
5047 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5048 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5049 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5050 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5051 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5052 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5053 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5054 #define MR_TOGGLE_RX            0x00002000
5055 #define MR_NP_RX                0x00004000
5056
5057 #define MR_LINK_OK              0x80000000
5058
5059         unsigned long link_time, cur_time;
5060
5061         u32 ability_match_cfg;
5062         int ability_match_count;
5063
5064         char ability_match, idle_match, ack_match;
5065
5066         u32 txconfig, rxconfig;
5067 #define ANEG_CFG_NP             0x00000080
5068 #define ANEG_CFG_ACK            0x00000040
5069 #define ANEG_CFG_RF2            0x00000020
5070 #define ANEG_CFG_RF1            0x00000010
5071 #define ANEG_CFG_PS2            0x00000001
5072 #define ANEG_CFG_PS1            0x00008000
5073 #define ANEG_CFG_HD             0x00004000
5074 #define ANEG_CFG_FD             0x00002000
5075 #define ANEG_CFG_INVAL          0x00001f06
5076
5077 };
5078 #define ANEG_OK         0
5079 #define ANEG_DONE       1
5080 #define ANEG_TIMER_ENAB 2
5081 #define ANEG_FAILED     -1
5082
5083 #define ANEG_STATE_SETTLE_TIME  10000
5084
5085 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5086                                    struct tg3_fiber_aneginfo *ap)
5087 {
5088         u16 flowctrl;
5089         unsigned long delta;
5090         u32 rx_cfg_reg;
5091         int ret;
5092
5093         if (ap->state == ANEG_STATE_UNKNOWN) {
5094                 ap->rxconfig = 0;
5095                 ap->link_time = 0;
5096                 ap->cur_time = 0;
5097                 ap->ability_match_cfg = 0;
5098                 ap->ability_match_count = 0;
5099                 ap->ability_match = 0;
5100                 ap->idle_match = 0;
5101                 ap->ack_match = 0;
5102         }
5103         ap->cur_time++;
5104
5105         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5106                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5107
5108                 if (rx_cfg_reg != ap->ability_match_cfg) {
5109                         ap->ability_match_cfg = rx_cfg_reg;
5110                         ap->ability_match = 0;
5111                         ap->ability_match_count = 0;
5112                 } else {
5113                         if (++ap->ability_match_count > 1) {
5114                                 ap->ability_match = 1;
5115                                 ap->ability_match_cfg = rx_cfg_reg;
5116                         }
5117                 }
5118                 if (rx_cfg_reg & ANEG_CFG_ACK)
5119                         ap->ack_match = 1;
5120                 else
5121                         ap->ack_match = 0;
5122
5123                 ap->idle_match = 0;
5124         } else {
5125                 ap->idle_match = 1;
5126                 ap->ability_match_cfg = 0;
5127                 ap->ability_match_count = 0;
5128                 ap->ability_match = 0;
5129                 ap->ack_match = 0;
5130
5131                 rx_cfg_reg = 0;
5132         }
5133
5134         ap->rxconfig = rx_cfg_reg;
5135         ret = ANEG_OK;
5136
5137         switch (ap->state) {
5138         case ANEG_STATE_UNKNOWN:
5139                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5140                         ap->state = ANEG_STATE_AN_ENABLE;
5141
5142                 /* fallthru */
5143         case ANEG_STATE_AN_ENABLE:
5144                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5145                 if (ap->flags & MR_AN_ENABLE) {
5146                         ap->link_time = 0;
5147                         ap->cur_time = 0;
5148                         ap->ability_match_cfg = 0;
5149                         ap->ability_match_count = 0;
5150                         ap->ability_match = 0;
5151                         ap->idle_match = 0;
5152                         ap->ack_match = 0;
5153
5154                         ap->state = ANEG_STATE_RESTART_INIT;
5155                 } else {
5156                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5157                 }
5158                 break;
5159
5160         case ANEG_STATE_RESTART_INIT:
5161                 ap->link_time = ap->cur_time;
5162                 ap->flags &= ~(MR_NP_LOADED);
5163                 ap->txconfig = 0;
5164                 tw32(MAC_TX_AUTO_NEG, 0);
5165                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5166                 tw32_f(MAC_MODE, tp->mac_mode);
5167                 udelay(40);
5168
5169                 ret = ANEG_TIMER_ENAB;
5170                 ap->state = ANEG_STATE_RESTART;
5171
5172                 /* fallthru */
5173         case ANEG_STATE_RESTART:
5174                 delta = ap->cur_time - ap->link_time;
5175                 if (delta > ANEG_STATE_SETTLE_TIME)
5176                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5177                 else
5178                         ret = ANEG_TIMER_ENAB;
5179                 break;
5180
5181         case ANEG_STATE_DISABLE_LINK_OK:
5182                 ret = ANEG_DONE;
5183                 break;
5184
5185         case ANEG_STATE_ABILITY_DETECT_INIT:
5186                 ap->flags &= ~(MR_TOGGLE_TX);
5187                 ap->txconfig = ANEG_CFG_FD;
5188                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5189                 if (flowctrl & ADVERTISE_1000XPAUSE)
5190                         ap->txconfig |= ANEG_CFG_PS1;
5191                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5192                         ap->txconfig |= ANEG_CFG_PS2;
5193                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5194                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5195                 tw32_f(MAC_MODE, tp->mac_mode);
5196                 udelay(40);
5197
5198                 ap->state = ANEG_STATE_ABILITY_DETECT;
5199                 break;
5200
5201         case ANEG_STATE_ABILITY_DETECT:
5202                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5203                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5204                 break;
5205
5206         case ANEG_STATE_ACK_DETECT_INIT:
5207                 ap->txconfig |= ANEG_CFG_ACK;
5208                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5209                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5210                 tw32_f(MAC_MODE, tp->mac_mode);
5211                 udelay(40);
5212
5213                 ap->state = ANEG_STATE_ACK_DETECT;
5214
5215                 /* fallthru */
5216         case ANEG_STATE_ACK_DETECT:
5217                 if (ap->ack_match != 0) {
5218                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5219                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5220                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5221                         } else {
5222                                 ap->state = ANEG_STATE_AN_ENABLE;
5223                         }
5224                 } else if (ap->ability_match != 0 &&
5225                            ap->rxconfig == 0) {
5226                         ap->state = ANEG_STATE_AN_ENABLE;
5227                 }
5228                 break;
5229
5230         case ANEG_STATE_COMPLETE_ACK_INIT:
5231                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5232                         ret = ANEG_FAILED;
5233                         break;
5234                 }
5235                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5236                                MR_LP_ADV_HALF_DUPLEX |
5237                                MR_LP_ADV_SYM_PAUSE |
5238                                MR_LP_ADV_ASYM_PAUSE |
5239                                MR_LP_ADV_REMOTE_FAULT1 |
5240                                MR_LP_ADV_REMOTE_FAULT2 |
5241                                MR_LP_ADV_NEXT_PAGE |
5242                                MR_TOGGLE_RX |
5243                                MR_NP_RX);
5244                 if (ap->rxconfig & ANEG_CFG_FD)
5245                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5246                 if (ap->rxconfig & ANEG_CFG_HD)
5247                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5248                 if (ap->rxconfig & ANEG_CFG_PS1)
5249                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5250                 if (ap->rxconfig & ANEG_CFG_PS2)
5251                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5252                 if (ap->rxconfig & ANEG_CFG_RF1)
5253                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5254                 if (ap->rxconfig & ANEG_CFG_RF2)
5255                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5256                 if (ap->rxconfig & ANEG_CFG_NP)
5257                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5258
5259                 ap->link_time = ap->cur_time;
5260
5261                 ap->flags ^= (MR_TOGGLE_TX);
5262                 if (ap->rxconfig & 0x0008)
5263                         ap->flags |= MR_TOGGLE_RX;
5264                 if (ap->rxconfig & ANEG_CFG_NP)
5265                         ap->flags |= MR_NP_RX;
5266                 ap->flags |= MR_PAGE_RX;
5267
5268                 ap->state = ANEG_STATE_COMPLETE_ACK;
5269                 ret = ANEG_TIMER_ENAB;
5270                 break;
5271
5272         case ANEG_STATE_COMPLETE_ACK:
5273                 if (ap->ability_match != 0 &&
5274                     ap->rxconfig == 0) {
5275                         ap->state = ANEG_STATE_AN_ENABLE;
5276                         break;
5277                 }
5278                 delta = ap->cur_time - ap->link_time;
5279                 if (delta > ANEG_STATE_SETTLE_TIME) {
5280                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5281                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5282                         } else {
5283                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5284                                     !(ap->flags & MR_NP_RX)) {
5285                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5286                                 } else {
5287                                         ret = ANEG_FAILED;
5288                                 }
5289                         }
5290                 }
5291                 break;
5292
5293         case ANEG_STATE_IDLE_DETECT_INIT:
5294                 ap->link_time = ap->cur_time;
5295                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5296                 tw32_f(MAC_MODE, tp->mac_mode);
5297                 udelay(40);
5298
5299                 ap->state = ANEG_STATE_IDLE_DETECT;
5300                 ret = ANEG_TIMER_ENAB;
5301                 break;
5302
5303         case ANEG_STATE_IDLE_DETECT:
5304                 if (ap->ability_match != 0 &&
5305                     ap->rxconfig == 0) {
5306                         ap->state = ANEG_STATE_AN_ENABLE;
5307                         break;
5308                 }
5309                 delta = ap->cur_time - ap->link_time;
5310                 if (delta > ANEG_STATE_SETTLE_TIME) {
5311                         /* XXX another gem from the Broadcom driver :( */
5312                         ap->state = ANEG_STATE_LINK_OK;
5313                 }
5314                 break;
5315
5316         case ANEG_STATE_LINK_OK:
5317                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5318                 ret = ANEG_DONE;
5319                 break;
5320
5321         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5322                 /* ??? unimplemented */
5323                 break;
5324
5325         case ANEG_STATE_NEXT_PAGE_WAIT:
5326                 /* ??? unimplemented */
5327                 break;
5328
5329         default:
5330                 ret = ANEG_FAILED;
5331                 break;
5332         }
5333
5334         return ret;
5335 }
5336
5337 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5338 {
5339         int res = 0;
5340         struct tg3_fiber_aneginfo aninfo;
5341         int status = ANEG_FAILED;
5342         unsigned int tick;
5343         u32 tmp;
5344
5345         tw32_f(MAC_TX_AUTO_NEG, 0);
5346
5347         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5348         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5349         udelay(40);
5350
5351         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5352         udelay(40);
5353
5354         memset(&aninfo, 0, sizeof(aninfo));
5355         aninfo.flags |= MR_AN_ENABLE;
5356         aninfo.state = ANEG_STATE_UNKNOWN;
5357         aninfo.cur_time = 0;
5358         tick = 0;
5359         while (++tick < 195000) {
5360                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5361                 if (status == ANEG_DONE || status == ANEG_FAILED)
5362                         break;
5363
5364                 udelay(1);
5365         }
5366
5367         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368         tw32_f(MAC_MODE, tp->mac_mode);
5369         udelay(40);
5370
5371         *txflags = aninfo.txconfig;
5372         *rxflags = aninfo.flags;
5373
5374         if (status == ANEG_DONE &&
5375             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5376                              MR_LP_ADV_FULL_DUPLEX)))
5377                 res = 1;
5378
5379         return res;
5380 }
5381
5382 static void tg3_init_bcm8002(struct tg3 *tp)
5383 {
5384         u32 mac_status = tr32(MAC_STATUS);
5385         int i;
5386
5387         /* Reset when initting first time or we have a link. */
5388         if (tg3_flag(tp, INIT_COMPLETE) &&
5389             !(mac_status & MAC_STATUS_PCS_SYNCED))
5390                 return;
5391
5392         /* Set PLL lock range. */
5393         tg3_writephy(tp, 0x16, 0x8007);
5394
5395         /* SW reset */
5396         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5397
5398         /* Wait for reset to complete. */
5399         /* XXX schedule_timeout() ... */
5400         for (i = 0; i < 500; i++)
5401                 udelay(10);
5402
5403         /* Config mode; select PMA/Ch 1 regs. */
5404         tg3_writephy(tp, 0x10, 0x8411);
5405
5406         /* Enable auto-lock and comdet, select txclk for tx. */
5407         tg3_writephy(tp, 0x11, 0x0a10);
5408
5409         tg3_writephy(tp, 0x18, 0x00a0);
5410         tg3_writephy(tp, 0x16, 0x41ff);
5411
5412         /* Assert and deassert POR. */
5413         tg3_writephy(tp, 0x13, 0x0400);
5414         udelay(40);
5415         tg3_writephy(tp, 0x13, 0x0000);
5416
5417         tg3_writephy(tp, 0x11, 0x0a50);
5418         udelay(40);
5419         tg3_writephy(tp, 0x11, 0x0a10);
5420
5421         /* Wait for signal to stabilize */
5422         /* XXX schedule_timeout() ... */
5423         for (i = 0; i < 15000; i++)
5424                 udelay(10);
5425
5426         /* Deselect the channel register so we can read the PHYID
5427          * later.
5428          */
5429         tg3_writephy(tp, 0x10, 0x8011);
5430 }
5431
5432 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5433 {
5434         u16 flowctrl;
5435         bool current_link_up;
5436         u32 sg_dig_ctrl, sg_dig_status;
5437         u32 serdes_cfg, expected_sg_dig_ctrl;
5438         int workaround, port_a;
5439
5440         serdes_cfg = 0;
5441         expected_sg_dig_ctrl = 0;
5442         workaround = 0;
5443         port_a = 1;
5444         current_link_up = false;
5445
5446         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5447             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5448                 workaround = 1;
5449                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5450                         port_a = 0;
5451
5452                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5453                 /* preserve bits 20-23 for voltage regulator */
5454                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5455         }
5456
5457         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5458
5459         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5460                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5461                         if (workaround) {
5462                                 u32 val = serdes_cfg;
5463
5464                                 if (port_a)
5465                                         val |= 0xc010000;
5466                                 else
5467                                         val |= 0x4010000;
5468                                 tw32_f(MAC_SERDES_CFG, val);
5469                         }
5470
5471                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5472                 }
5473                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5474                         tg3_setup_flow_control(tp, 0, 0);
5475                         current_link_up = true;
5476                 }
5477                 goto out;
5478         }
5479
5480         /* Want auto-negotiation.  */
5481         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5482
5483         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5484         if (flowctrl & ADVERTISE_1000XPAUSE)
5485                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5486         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5487                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5488
5489         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5490                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5491                     tp->serdes_counter &&
5492                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5493                                     MAC_STATUS_RCVD_CFG)) ==
5494                      MAC_STATUS_PCS_SYNCED)) {
5495                         tp->serdes_counter--;
5496                         current_link_up = true;
5497                         goto out;
5498                 }
5499 restart_autoneg:
5500                 if (workaround)
5501                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5502                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5503                 udelay(5);
5504                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5505
5506                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5507                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5508         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5509                                  MAC_STATUS_SIGNAL_DET)) {
5510                 sg_dig_status = tr32(SG_DIG_STATUS);
5511                 mac_status = tr32(MAC_STATUS);
5512
5513                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5514                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5515                         u32 local_adv = 0, remote_adv = 0;
5516
5517                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5518                                 local_adv |= ADVERTISE_1000XPAUSE;
5519                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5520                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5521
5522                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5523                                 remote_adv |= LPA_1000XPAUSE;
5524                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5525                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5526
5527                         tp->link_config.rmt_adv =
5528                                            mii_adv_to_ethtool_adv_x(remote_adv);
5529
5530                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5531                         current_link_up = true;
5532                         tp->serdes_counter = 0;
5533                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5535                         if (tp->serdes_counter)
5536                                 tp->serdes_counter--;
5537                         else {
5538                                 if (workaround) {
5539                                         u32 val = serdes_cfg;
5540
5541                                         if (port_a)
5542                                                 val |= 0xc010000;
5543                                         else
5544                                                 val |= 0x4010000;
5545
5546                                         tw32_f(MAC_SERDES_CFG, val);
5547                                 }
5548
5549                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5550                                 udelay(40);
5551
5552                                 /* Link parallel detection - link is up */
5553                                 /* only if we have PCS_SYNC and not */
5554                                 /* receiving config code words */
5555                                 mac_status = tr32(MAC_STATUS);
5556                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5557                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5558                                         tg3_setup_flow_control(tp, 0, 0);
5559                                         current_link_up = true;
5560                                         tp->phy_flags |=
5561                                                 TG3_PHYFLG_PARALLEL_DETECT;
5562                                         tp->serdes_counter =
5563                                                 SERDES_PARALLEL_DET_TIMEOUT;
5564                                 } else
5565                                         goto restart_autoneg;
5566                         }
5567                 }
5568         } else {
5569                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5570                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5571         }
5572
5573 out:
5574         return current_link_up;
5575 }
5576
5577 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5578 {
5579         bool current_link_up = false;
5580
5581         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5582                 goto out;
5583
5584         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5585                 u32 txflags, rxflags;
5586                 int i;
5587
5588                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5589                         u32 local_adv = 0, remote_adv = 0;
5590
5591                         if (txflags & ANEG_CFG_PS1)
5592                                 local_adv |= ADVERTISE_1000XPAUSE;
5593                         if (txflags & ANEG_CFG_PS2)
5594                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5595
5596                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5597                                 remote_adv |= LPA_1000XPAUSE;
5598                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5599                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5600
5601                         tp->link_config.rmt_adv =
5602                                            mii_adv_to_ethtool_adv_x(remote_adv);
5603
5604                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5605
5606                         current_link_up = true;
5607                 }
5608                 for (i = 0; i < 30; i++) {
5609                         udelay(20);
5610                         tw32_f(MAC_STATUS,
5611                                (MAC_STATUS_SYNC_CHANGED |
5612                                 MAC_STATUS_CFG_CHANGED));
5613                         udelay(40);
5614                         if ((tr32(MAC_STATUS) &
5615                              (MAC_STATUS_SYNC_CHANGED |
5616                               MAC_STATUS_CFG_CHANGED)) == 0)
5617                                 break;
5618                 }
5619
5620                 mac_status = tr32(MAC_STATUS);
5621                 if (!current_link_up &&
5622                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5623                     !(mac_status & MAC_STATUS_RCVD_CFG))
5624                         current_link_up = true;
5625         } else {
5626                 tg3_setup_flow_control(tp, 0, 0);
5627
5628                 /* Forcing 1000FD link up. */
5629                 current_link_up = true;
5630
5631                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5632                 udelay(40);
5633
5634                 tw32_f(MAC_MODE, tp->mac_mode);
5635                 udelay(40);
5636         }
5637
5638 out:
5639         return current_link_up;
5640 }
5641
5642 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5643 {
5644         u32 orig_pause_cfg;
5645         u16 orig_active_speed;
5646         u8 orig_active_duplex;
5647         u32 mac_status;
5648         bool current_link_up;
5649         int i;
5650
5651         orig_pause_cfg = tp->link_config.active_flowctrl;
5652         orig_active_speed = tp->link_config.active_speed;
5653         orig_active_duplex = tp->link_config.active_duplex;
5654
5655         if (!tg3_flag(tp, HW_AUTONEG) &&
5656             tp->link_up &&
5657             tg3_flag(tp, INIT_COMPLETE)) {
5658                 mac_status = tr32(MAC_STATUS);
5659                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5660                                MAC_STATUS_SIGNAL_DET |
5661                                MAC_STATUS_CFG_CHANGED |
5662                                MAC_STATUS_RCVD_CFG);
5663                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5664                                    MAC_STATUS_SIGNAL_DET)) {
5665                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5666                                             MAC_STATUS_CFG_CHANGED));
5667                         return 0;
5668                 }
5669         }
5670
5671         tw32_f(MAC_TX_AUTO_NEG, 0);
5672
5673         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5674         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5675         tw32_f(MAC_MODE, tp->mac_mode);
5676         udelay(40);
5677
5678         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5679                 tg3_init_bcm8002(tp);
5680
5681         /* Enable link change event even when serdes polling.  */
5682         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5683         udelay(40);
5684
5685         current_link_up = false;
5686         tp->link_config.rmt_adv = 0;
5687         mac_status = tr32(MAC_STATUS);
5688
5689         if (tg3_flag(tp, HW_AUTONEG))
5690                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5691         else
5692                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5693
5694         tp->napi[0].hw_status->status =
5695                 (SD_STATUS_UPDATED |
5696                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5697
5698         for (i = 0; i < 100; i++) {
5699                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5700                                     MAC_STATUS_CFG_CHANGED));
5701                 udelay(5);
5702                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5703                                          MAC_STATUS_CFG_CHANGED |
5704                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5705                         break;
5706         }
5707
5708         mac_status = tr32(MAC_STATUS);
5709         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5710                 current_link_up = false;
5711                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5712                     tp->serdes_counter == 0) {
5713                         tw32_f(MAC_MODE, (tp->mac_mode |
5714                                           MAC_MODE_SEND_CONFIGS));
5715                         udelay(1);
5716                         tw32_f(MAC_MODE, tp->mac_mode);
5717                 }
5718         }
5719
5720         if (current_link_up) {
5721                 tp->link_config.active_speed = SPEED_1000;
5722                 tp->link_config.active_duplex = DUPLEX_FULL;
5723                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5724                                     LED_CTRL_LNKLED_OVERRIDE |
5725                                     LED_CTRL_1000MBPS_ON));
5726         } else {
5727                 tp->link_config.active_speed = SPEED_UNKNOWN;
5728                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5729                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5730                                     LED_CTRL_LNKLED_OVERRIDE |
5731                                     LED_CTRL_TRAFFIC_OVERRIDE));
5732         }
5733
5734         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5735                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5736                 if (orig_pause_cfg != now_pause_cfg ||
5737                     orig_active_speed != tp->link_config.active_speed ||
5738                     orig_active_duplex != tp->link_config.active_duplex)
5739                         tg3_link_report(tp);
5740         }
5741
5742         return 0;
5743 }
5744
5745 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5746 {
5747         int err = 0;
5748         u32 bmsr, bmcr;
5749         u16 current_speed = SPEED_UNKNOWN;
5750         u8 current_duplex = DUPLEX_UNKNOWN;
5751         bool current_link_up = false;
5752         u32 local_adv, remote_adv, sgsr;
5753
5754         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5755              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5756              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5757              (sgsr & SERDES_TG3_SGMII_MODE)) {
5758
5759                 if (force_reset)
5760                         tg3_phy_reset(tp);
5761
5762                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5763
5764                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5765                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5766                 } else {
5767                         current_link_up = true;
5768                         if (sgsr & SERDES_TG3_SPEED_1000) {
5769                                 current_speed = SPEED_1000;
5770                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5771                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5772                                 current_speed = SPEED_100;
5773                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5774                         } else {
5775                                 current_speed = SPEED_10;
5776                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5777                         }
5778
5779                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5780                                 current_duplex = DUPLEX_FULL;
5781                         else
5782                                 current_duplex = DUPLEX_HALF;
5783                 }
5784
5785                 tw32_f(MAC_MODE, tp->mac_mode);
5786                 udelay(40);
5787
5788                 tg3_clear_mac_status(tp);
5789
5790                 goto fiber_setup_done;
5791         }
5792
5793         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794         tw32_f(MAC_MODE, tp->mac_mode);
5795         udelay(40);
5796
5797         tg3_clear_mac_status(tp);
5798
5799         if (force_reset)
5800                 tg3_phy_reset(tp);
5801
5802         tp->link_config.rmt_adv = 0;
5803
5804         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5805         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5806         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5807                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5808                         bmsr |= BMSR_LSTATUS;
5809                 else
5810                         bmsr &= ~BMSR_LSTATUS;
5811         }
5812
5813         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5814
5815         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5816             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5817                 /* do nothing, just check for link up at the end */
5818         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5819                 u32 adv, newadv;
5820
5821                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5822                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5823                                  ADVERTISE_1000XPAUSE |
5824                                  ADVERTISE_1000XPSE_ASYM |
5825                                  ADVERTISE_SLCT);
5826
5827                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5828                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5829
5830                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5831                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5832                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5833                         tg3_writephy(tp, MII_BMCR, bmcr);
5834
5835                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5836                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5837                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5838
5839                         return err;
5840                 }
5841         } else {
5842                 u32 new_bmcr;
5843
5844                 bmcr &= ~BMCR_SPEED1000;
5845                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5846
5847                 if (tp->link_config.duplex == DUPLEX_FULL)
5848                         new_bmcr |= BMCR_FULLDPLX;
5849
5850                 if (new_bmcr != bmcr) {
5851                         /* BMCR_SPEED1000 is a reserved bit that needs
5852                          * to be set on write.
5853                          */
5854                         new_bmcr |= BMCR_SPEED1000;
5855
5856                         /* Force a linkdown */
5857                         if (tp->link_up) {
5858                                 u32 adv;
5859
5860                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5861                                 adv &= ~(ADVERTISE_1000XFULL |
5862                                          ADVERTISE_1000XHALF |
5863                                          ADVERTISE_SLCT);
5864                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5865                                 tg3_writephy(tp, MII_BMCR, bmcr |
5866                                                            BMCR_ANRESTART |
5867                                                            BMCR_ANENABLE);
5868                                 udelay(10);
5869                                 tg3_carrier_off(tp);
5870                         }
5871                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5872                         bmcr = new_bmcr;
5873                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877                                         bmsr |= BMSR_LSTATUS;
5878                                 else
5879                                         bmsr &= ~BMSR_LSTATUS;
5880                         }
5881                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5882                 }
5883         }
5884
5885         if (bmsr & BMSR_LSTATUS) {
5886                 current_speed = SPEED_1000;
5887                 current_link_up = true;
5888                 if (bmcr & BMCR_FULLDPLX)
5889                         current_duplex = DUPLEX_FULL;
5890                 else
5891                         current_duplex = DUPLEX_HALF;
5892
5893                 local_adv = 0;
5894                 remote_adv = 0;
5895
5896                 if (bmcr & BMCR_ANENABLE) {
5897                         u32 common;
5898
5899                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5900                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5901                         common = local_adv & remote_adv;
5902                         if (common & (ADVERTISE_1000XHALF |
5903                                       ADVERTISE_1000XFULL)) {
5904                                 if (common & ADVERTISE_1000XFULL)
5905                                         current_duplex = DUPLEX_FULL;
5906                                 else
5907                                         current_duplex = DUPLEX_HALF;
5908
5909                                 tp->link_config.rmt_adv =
5910                                            mii_adv_to_ethtool_adv_x(remote_adv);
5911                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5912                                 /* Link is up via parallel detect */
5913                         } else {
5914                                 current_link_up = false;
5915                         }
5916                 }
5917         }
5918
5919 fiber_setup_done:
5920         if (current_link_up && current_duplex == DUPLEX_FULL)
5921                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5922
5923         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5924         if (tp->link_config.active_duplex == DUPLEX_HALF)
5925                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5926
5927         tw32_f(MAC_MODE, tp->mac_mode);
5928         udelay(40);
5929
5930         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5931
5932         tp->link_config.active_speed = current_speed;
5933         tp->link_config.active_duplex = current_duplex;
5934
5935         tg3_test_and_report_link_chg(tp, current_link_up);
5936         return err;
5937 }
5938
5939 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5940 {
5941         if (tp->serdes_counter) {
5942                 /* Give autoneg time to complete. */
5943                 tp->serdes_counter--;
5944                 return;
5945         }
5946
5947         if (!tp->link_up &&
5948             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5949                 u32 bmcr;
5950
5951                 tg3_readphy(tp, MII_BMCR, &bmcr);
5952                 if (bmcr & BMCR_ANENABLE) {
5953                         u32 phy1, phy2;
5954
5955                         /* Select shadow register 0x1f */
5956                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5957                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5958
5959                         /* Select expansion interrupt status register */
5960                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5961                                          MII_TG3_DSP_EXP1_INT_STAT);
5962                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5963                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5964
5965                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5966                                 /* We have signal detect and not receiving
5967                                  * config code words, link is up by parallel
5968                                  * detection.
5969                                  */
5970
5971                                 bmcr &= ~BMCR_ANENABLE;
5972                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5973                                 tg3_writephy(tp, MII_BMCR, bmcr);
5974                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5975                         }
5976                 }
5977         } else if (tp->link_up &&
5978                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5979                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5980                 u32 phy2;
5981
5982                 /* Select expansion interrupt status register */
5983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5984                                  MII_TG3_DSP_EXP1_INT_STAT);
5985                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5986                 if (phy2 & 0x20) {
5987                         u32 bmcr;
5988
5989                         /* Config code words received, turn on autoneg. */
5990                         tg3_readphy(tp, MII_BMCR, &bmcr);
5991                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5992
5993                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5994
5995                 }
5996         }
5997 }
5998
5999 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6000 {
6001         u32 val;
6002         int err;
6003
6004         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6005                 err = tg3_setup_fiber_phy(tp, force_reset);
6006         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6007                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6008         else
6009                 err = tg3_setup_copper_phy(tp, force_reset);
6010
6011         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6012                 u32 scale;
6013
6014                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6015                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6016                         scale = 65;
6017                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6018                         scale = 6;
6019                 else
6020                         scale = 12;
6021
6022                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6023                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6024                 tw32(GRC_MISC_CFG, val);
6025         }
6026
6027         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6028               (6 << TX_LENGTHS_IPG_SHIFT);
6029         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6030             tg3_asic_rev(tp) == ASIC_REV_5762)
6031                 val |= tr32(MAC_TX_LENGTHS) &
6032                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6033                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6034
6035         if (tp->link_config.active_speed == SPEED_1000 &&
6036             tp->link_config.active_duplex == DUPLEX_HALF)
6037                 tw32(MAC_TX_LENGTHS, val |
6038                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6039         else
6040                 tw32(MAC_TX_LENGTHS, val |
6041                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6042
6043         if (!tg3_flag(tp, 5705_PLUS)) {
6044                 if (tp->link_up) {
6045                         tw32(HOSTCC_STAT_COAL_TICKS,
6046                              tp->coal.stats_block_coalesce_usecs);
6047                 } else {
6048                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6049                 }
6050         }
6051
6052         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6053                 val = tr32(PCIE_PWR_MGMT_THRESH);
6054                 if (!tp->link_up)
6055                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6056                               tp->pwrmgmt_thresh;
6057                 else
6058                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6059                 tw32(PCIE_PWR_MGMT_THRESH, val);
6060         }
6061
6062         return err;
6063 }
6064
6065 /* tp->lock must be held */
6066 static u64 tg3_refclk_read(struct tg3 *tp)
6067 {
6068         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6069         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6070 }
6071
6072 /* tp->lock must be held */
6073 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6074 {
6075         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6076         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6077         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6078         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6079 }
6080
6081 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6082 static inline void tg3_full_unlock(struct tg3 *tp);
6083 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6084 {
6085         struct tg3 *tp = netdev_priv(dev);
6086
6087         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6088                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6089                                 SOF_TIMESTAMPING_SOFTWARE;
6090
6091         if (tg3_flag(tp, PTP_CAPABLE)) {
6092                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6093                                         SOF_TIMESTAMPING_RX_HARDWARE |
6094                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6095         }
6096
6097         if (tp->ptp_clock)
6098                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6099         else
6100                 info->phc_index = -1;
6101
6102         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6103
6104         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6105                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6106                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6107                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6108         return 0;
6109 }
6110
6111 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6112 {
6113         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6114         bool neg_adj = false;
6115         u32 correction = 0;
6116
6117         if (ppb < 0) {
6118                 neg_adj = true;
6119                 ppb = -ppb;
6120         }
6121
6122         /* Frequency adjustment is performed using hardware with a 24 bit
6123          * accumulator and a programmable correction value. On each clk, the
6124          * correction value gets added to the accumulator and when it
6125          * overflows, the time counter is incremented/decremented.
6126          *
6127          * So conversion from ppb to correction value is
6128          *              ppb * (1 << 24) / 1000000000
6129          */
6130         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6131                      TG3_EAV_REF_CLK_CORRECT_MASK;
6132
6133         tg3_full_lock(tp, 0);
6134
6135         if (correction)
6136                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6137                      TG3_EAV_REF_CLK_CORRECT_EN |
6138                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6139         else
6140                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6141
6142         tg3_full_unlock(tp);
6143
6144         return 0;
6145 }
6146
6147 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6148 {
6149         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6150
6151         tg3_full_lock(tp, 0);
6152         tp->ptp_adjust += delta;
6153         tg3_full_unlock(tp);
6154
6155         return 0;
6156 }
6157
6158 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6159 {
6160         u64 ns;
6161         u32 remainder;
6162         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6163
6164         tg3_full_lock(tp, 0);
6165         ns = tg3_refclk_read(tp);
6166         ns += tp->ptp_adjust;
6167         tg3_full_unlock(tp);
6168
6169         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6170         ts->tv_nsec = remainder;
6171
6172         return 0;
6173 }
6174
6175 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6176                            const struct timespec *ts)
6177 {
6178         u64 ns;
6179         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6180
6181         ns = timespec_to_ns(ts);
6182
6183         tg3_full_lock(tp, 0);
6184         tg3_refclk_write(tp, ns);
6185         tp->ptp_adjust = 0;
6186         tg3_full_unlock(tp);
6187
6188         return 0;
6189 }
6190
6191 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6192                           struct ptp_clock_request *rq, int on)
6193 {
6194         return -EOPNOTSUPP;
6195 }
6196
6197 static const struct ptp_clock_info tg3_ptp_caps = {
6198         .owner          = THIS_MODULE,
6199         .name           = "tg3 clock",
6200         .max_adj        = 250000000,
6201         .n_alarm        = 0,
6202         .n_ext_ts       = 0,
6203         .n_per_out      = 0,
6204         .pps            = 0,
6205         .adjfreq        = tg3_ptp_adjfreq,
6206         .adjtime        = tg3_ptp_adjtime,
6207         .gettime        = tg3_ptp_gettime,
6208         .settime        = tg3_ptp_settime,
6209         .enable         = tg3_ptp_enable,
6210 };
6211
6212 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6213                                      struct skb_shared_hwtstamps *timestamp)
6214 {
6215         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6216         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6217                                            tp->ptp_adjust);
6218 }
6219
6220 /* tp->lock must be held */
6221 static void tg3_ptp_init(struct tg3 *tp)
6222 {
6223         if (!tg3_flag(tp, PTP_CAPABLE))
6224                 return;
6225
6226         /* Initialize the hardware clock to the system time. */
6227         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6228         tp->ptp_adjust = 0;
6229         tp->ptp_info = tg3_ptp_caps;
6230 }
6231
6232 /* tp->lock must be held */
6233 static void tg3_ptp_resume(struct tg3 *tp)
6234 {
6235         if (!tg3_flag(tp, PTP_CAPABLE))
6236                 return;
6237
6238         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6239         tp->ptp_adjust = 0;
6240 }
6241
6242 static void tg3_ptp_fini(struct tg3 *tp)
6243 {
6244         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6245                 return;
6246
6247         ptp_clock_unregister(tp->ptp_clock);
6248         tp->ptp_clock = NULL;
6249         tp->ptp_adjust = 0;
6250 }
6251
6252 static inline int tg3_irq_sync(struct tg3 *tp)
6253 {
6254         return tp->irq_sync;
6255 }
6256
6257 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6258 {
6259         int i;
6260
6261         dst = (u32 *)((u8 *)dst + off);
6262         for (i = 0; i < len; i += sizeof(u32))
6263                 *dst++ = tr32(off + i);
6264 }
6265
6266 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6267 {
6268         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6269         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6270         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6271         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6272         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6273         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6274         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6275         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6276         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6277         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6278         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6279         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6280         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6281         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6282         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6283         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6284         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6285         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6286         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6287
6288         if (tg3_flag(tp, SUPPORT_MSIX))
6289                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6290
6291         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6292         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6293         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6294         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6295         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6296         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6297         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6298         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6299
6300         if (!tg3_flag(tp, 5705_PLUS)) {
6301                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6302                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6303                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6304         }
6305
6306         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6307         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6308         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6309         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6310         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6311
6312         if (tg3_flag(tp, NVRAM))
6313                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6314 }
6315
6316 static void tg3_dump_state(struct tg3 *tp)
6317 {
6318         int i;
6319         u32 *regs;
6320
6321         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6322         if (!regs)
6323                 return;
6324
6325         if (tg3_flag(tp, PCI_EXPRESS)) {
6326                 /* Read up to but not including private PCI registers */
6327                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6328                         regs[i / sizeof(u32)] = tr32(i);
6329         } else
6330                 tg3_dump_legacy_regs(tp, regs);
6331
6332         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6333                 if (!regs[i + 0] && !regs[i + 1] &&
6334                     !regs[i + 2] && !regs[i + 3])
6335                         continue;
6336
6337                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6338                            i * 4,
6339                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6340         }
6341
6342         kfree(regs);
6343
6344         for (i = 0; i < tp->irq_cnt; i++) {
6345                 struct tg3_napi *tnapi = &tp->napi[i];
6346
6347                 /* SW status block */
6348                 netdev_err(tp->dev,
6349                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6350                            i,
6351                            tnapi->hw_status->status,
6352                            tnapi->hw_status->status_tag,
6353                            tnapi->hw_status->rx_jumbo_consumer,
6354                            tnapi->hw_status->rx_consumer,
6355                            tnapi->hw_status->rx_mini_consumer,
6356                            tnapi->hw_status->idx[0].rx_producer,
6357                            tnapi->hw_status->idx[0].tx_consumer);
6358
6359                 netdev_err(tp->dev,
6360                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6361                            i,
6362                            tnapi->last_tag, tnapi->last_irq_tag,
6363                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6364                            tnapi->rx_rcb_ptr,
6365                            tnapi->prodring.rx_std_prod_idx,
6366                            tnapi->prodring.rx_std_cons_idx,
6367                            tnapi->prodring.rx_jmb_prod_idx,
6368                            tnapi->prodring.rx_jmb_cons_idx);
6369         }
6370 }
6371
6372 /* This is called whenever we suspect that the system chipset is re-
6373  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6374  * is bogus tx completions. We try to recover by setting the
6375  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6376  * in the workqueue.
6377  */
6378 static void tg3_tx_recover(struct tg3 *tp)
6379 {
6380         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6381                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6382
6383         netdev_warn(tp->dev,
6384                     "The system may be re-ordering memory-mapped I/O "
6385                     "cycles to the network device, attempting to recover. "
6386                     "Please report the problem to the driver maintainer "
6387                     "and include system chipset information.\n");
6388
6389         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6390 }
6391
6392 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6393 {
6394         /* Tell compiler to fetch tx indices from memory. */
6395         barrier();
6396         return tnapi->tx_pending -
6397                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6398 }
6399
6400 /* Tigon3 never reports partial packet sends.  So we do not
6401  * need special logic to handle SKBs that have not had all
6402  * of their frags sent yet, like SunGEM does.
6403  */
6404 static void tg3_tx(struct tg3_napi *tnapi)
6405 {
6406         struct tg3 *tp = tnapi->tp;
6407         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6408         u32 sw_idx = tnapi->tx_cons;
6409         struct netdev_queue *txq;
6410         int index = tnapi - tp->napi;
6411         unsigned int pkts_compl = 0, bytes_compl = 0;
6412
6413         if (tg3_flag(tp, ENABLE_TSS))
6414                 index--;
6415
6416         txq = netdev_get_tx_queue(tp->dev, index);
6417
6418         while (sw_idx != hw_idx) {
6419                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6420                 struct sk_buff *skb = ri->skb;
6421                 int i, tx_bug = 0;
6422
6423                 if (unlikely(skb == NULL)) {
6424                         tg3_tx_recover(tp);
6425                         return;
6426                 }
6427
6428                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6429                         struct skb_shared_hwtstamps timestamp;
6430                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6431                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6432
6433                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6434
6435                         skb_tstamp_tx(skb, &timestamp);
6436                 }
6437
6438                 pci_unmap_single(tp->pdev,
6439                                  dma_unmap_addr(ri, mapping),
6440                                  skb_headlen(skb),
6441                                  PCI_DMA_TODEVICE);
6442
6443                 ri->skb = NULL;
6444
6445                 while (ri->fragmented) {
6446                         ri->fragmented = false;
6447                         sw_idx = NEXT_TX(sw_idx);
6448                         ri = &tnapi->tx_buffers[sw_idx];
6449                 }
6450
6451                 sw_idx = NEXT_TX(sw_idx);
6452
6453                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6454                         ri = &tnapi->tx_buffers[sw_idx];
6455                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6456                                 tx_bug = 1;
6457
6458                         pci_unmap_page(tp->pdev,
6459                                        dma_unmap_addr(ri, mapping),
6460                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6461                                        PCI_DMA_TODEVICE);
6462
6463                         while (ri->fragmented) {
6464                                 ri->fragmented = false;
6465                                 sw_idx = NEXT_TX(sw_idx);
6466                                 ri = &tnapi->tx_buffers[sw_idx];
6467                         }
6468
6469                         sw_idx = NEXT_TX(sw_idx);
6470                 }
6471
6472                 pkts_compl++;
6473                 bytes_compl += skb->len;
6474
6475                 dev_kfree_skb(skb);
6476
6477                 if (unlikely(tx_bug)) {
6478                         tg3_tx_recover(tp);
6479                         return;
6480                 }
6481         }
6482
6483         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6484
6485         tnapi->tx_cons = sw_idx;
6486
6487         /* Need to make the tx_cons update visible to tg3_start_xmit()
6488          * before checking for netif_queue_stopped().  Without the
6489          * memory barrier, there is a small possibility that tg3_start_xmit()
6490          * will miss it and cause the queue to be stopped forever.
6491          */
6492         smp_mb();
6493
6494         if (unlikely(netif_tx_queue_stopped(txq) &&
6495                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6496                 __netif_tx_lock(txq, smp_processor_id());
6497                 if (netif_tx_queue_stopped(txq) &&
6498                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6499                         netif_tx_wake_queue(txq);
6500                 __netif_tx_unlock(txq);
6501         }
6502 }
6503
6504 static void tg3_frag_free(bool is_frag, void *data)
6505 {
6506         if (is_frag)
6507                 put_page(virt_to_head_page(data));
6508         else
6509                 kfree(data);
6510 }
6511
6512 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6513 {
6514         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6515                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6516
6517         if (!ri->data)
6518                 return;
6519
6520         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6521                          map_sz, PCI_DMA_FROMDEVICE);
6522         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6523         ri->data = NULL;
6524 }
6525
6526
6527 /* Returns size of skb allocated or < 0 on error.
6528  *
6529  * We only need to fill in the address because the other members
6530  * of the RX descriptor are invariant, see tg3_init_rings.
6531  *
6532  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6533  * posting buffers we only dirty the first cache line of the RX
6534  * descriptor (containing the address).  Whereas for the RX status
6535  * buffers the cpu only reads the last cacheline of the RX descriptor
6536  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6537  */
6538 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6539                              u32 opaque_key, u32 dest_idx_unmasked,
6540                              unsigned int *frag_size)
6541 {
6542         struct tg3_rx_buffer_desc *desc;
6543         struct ring_info *map;
6544         u8 *data;
6545         dma_addr_t mapping;
6546         int skb_size, data_size, dest_idx;
6547
6548         switch (opaque_key) {
6549         case RXD_OPAQUE_RING_STD:
6550                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6551                 desc = &tpr->rx_std[dest_idx];
6552                 map = &tpr->rx_std_buffers[dest_idx];
6553                 data_size = tp->rx_pkt_map_sz;
6554                 break;
6555
6556         case RXD_OPAQUE_RING_JUMBO:
6557                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6558                 desc = &tpr->rx_jmb[dest_idx].std;
6559                 map = &tpr->rx_jmb_buffers[dest_idx];
6560                 data_size = TG3_RX_JMB_MAP_SZ;
6561                 break;
6562
6563         default:
6564                 return -EINVAL;
6565         }
6566
6567         /* Do not overwrite any of the map or rp information
6568          * until we are sure we can commit to a new buffer.
6569          *
6570          * Callers depend upon this behavior and assume that
6571          * we leave everything unchanged if we fail.
6572          */
6573         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6574                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6575         if (skb_size <= PAGE_SIZE) {
6576                 data = netdev_alloc_frag(skb_size);
6577                 *frag_size = skb_size;
6578         } else {
6579                 data = kmalloc(skb_size, GFP_ATOMIC);
6580                 *frag_size = 0;
6581         }
6582         if (!data)
6583                 return -ENOMEM;
6584
6585         mapping = pci_map_single(tp->pdev,
6586                                  data + TG3_RX_OFFSET(tp),
6587                                  data_size,
6588                                  PCI_DMA_FROMDEVICE);
6589         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6590                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6591                 return -EIO;
6592         }
6593
6594         map->data = data;
6595         dma_unmap_addr_set(map, mapping, mapping);
6596
6597         desc->addr_hi = ((u64)mapping >> 32);
6598         desc->addr_lo = ((u64)mapping & 0xffffffff);
6599
6600         return data_size;
6601 }
6602
6603 /* We only need to move over in the address because the other
6604  * members of the RX descriptor are invariant.  See notes above
6605  * tg3_alloc_rx_data for full details.
6606  */
6607 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6608                            struct tg3_rx_prodring_set *dpr,
6609                            u32 opaque_key, int src_idx,
6610                            u32 dest_idx_unmasked)
6611 {
6612         struct tg3 *tp = tnapi->tp;
6613         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6614         struct ring_info *src_map, *dest_map;
6615         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6616         int dest_idx;
6617
6618         switch (opaque_key) {
6619         case RXD_OPAQUE_RING_STD:
6620                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6621                 dest_desc = &dpr->rx_std[dest_idx];
6622                 dest_map = &dpr->rx_std_buffers[dest_idx];
6623                 src_desc = &spr->rx_std[src_idx];
6624                 src_map = &spr->rx_std_buffers[src_idx];
6625                 break;
6626
6627         case RXD_OPAQUE_RING_JUMBO:
6628                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6629                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6630                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6631                 src_desc = &spr->rx_jmb[src_idx].std;
6632                 src_map = &spr->rx_jmb_buffers[src_idx];
6633                 break;
6634
6635         default:
6636                 return;
6637         }
6638
6639         dest_map->data = src_map->data;
6640         dma_unmap_addr_set(dest_map, mapping,
6641                            dma_unmap_addr(src_map, mapping));
6642         dest_desc->addr_hi = src_desc->addr_hi;
6643         dest_desc->addr_lo = src_desc->addr_lo;
6644
6645         /* Ensure that the update to the skb happens after the physical
6646          * addresses have been transferred to the new BD location.
6647          */
6648         smp_wmb();
6649
6650         src_map->data = NULL;
6651 }
6652
6653 /* The RX ring scheme is composed of multiple rings which post fresh
6654  * buffers to the chip, and one special ring the chip uses to report
6655  * status back to the host.
6656  *
6657  * The special ring reports the status of received packets to the
6658  * host.  The chip does not write into the original descriptor the
6659  * RX buffer was obtained from.  The chip simply takes the original
6660  * descriptor as provided by the host, updates the status and length
6661  * field, then writes this into the next status ring entry.
6662  *
6663  * Each ring the host uses to post buffers to the chip is described
6664  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6665  * it is first placed into the on-chip ram.  When the packet's length
6666  * is known, it walks down the TG3_BDINFO entries to select the ring.
6667  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6668  * which is within the range of the new packet's length is chosen.
6669  *
6670  * The "separate ring for rx status" scheme may sound queer, but it makes
6671  * sense from a cache coherency perspective.  If only the host writes
6672  * to the buffer post rings, and only the chip writes to the rx status
6673  * rings, then cache lines never move beyond shared-modified state.
6674  * If both the host and chip were to write into the same ring, cache line
6675  * eviction could occur since both entities want it in an exclusive state.
6676  */
6677 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6678 {
6679         struct tg3 *tp = tnapi->tp;
6680         u32 work_mask, rx_std_posted = 0;
6681         u32 std_prod_idx, jmb_prod_idx;
6682         u32 sw_idx = tnapi->rx_rcb_ptr;
6683         u16 hw_idx;
6684         int received;
6685         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6686
6687         hw_idx = *(tnapi->rx_rcb_prod_idx);
6688         /*
6689          * We need to order the read of hw_idx and the read of
6690          * the opaque cookie.
6691          */
6692         rmb();
6693         work_mask = 0;
6694         received = 0;
6695         std_prod_idx = tpr->rx_std_prod_idx;
6696         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6697         while (sw_idx != hw_idx && budget > 0) {
6698                 struct ring_info *ri;
6699                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6700                 unsigned int len;
6701                 struct sk_buff *skb;
6702                 dma_addr_t dma_addr;
6703                 u32 opaque_key, desc_idx, *post_ptr;
6704                 u8 *data;
6705                 u64 tstamp = 0;
6706
6707                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6708                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6709                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6710                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6711                         dma_addr = dma_unmap_addr(ri, mapping);
6712                         data = ri->data;
6713                         post_ptr = &std_prod_idx;
6714                         rx_std_posted++;
6715                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6716                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6717                         dma_addr = dma_unmap_addr(ri, mapping);
6718                         data = ri->data;
6719                         post_ptr = &jmb_prod_idx;
6720                 } else
6721                         goto next_pkt_nopost;
6722
6723                 work_mask |= opaque_key;
6724
6725                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6726                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6727                 drop_it:
6728                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6729                                        desc_idx, *post_ptr);
6730                 drop_it_no_recycle:
6731                         /* Other statistics kept track of by card. */
6732                         tp->rx_dropped++;
6733                         goto next_pkt;
6734                 }
6735
6736                 prefetch(data + TG3_RX_OFFSET(tp));
6737                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6738                       ETH_FCS_LEN;
6739
6740                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6741                      RXD_FLAG_PTPSTAT_PTPV1 ||
6742                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6743                      RXD_FLAG_PTPSTAT_PTPV2) {
6744                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6745                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6746                 }
6747
6748                 if (len > TG3_RX_COPY_THRESH(tp)) {
6749                         int skb_size;
6750                         unsigned int frag_size;
6751
6752                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6753                                                     *post_ptr, &frag_size);
6754                         if (skb_size < 0)
6755                                 goto drop_it;
6756
6757                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6758                                          PCI_DMA_FROMDEVICE);
6759
6760                         skb = build_skb(data, frag_size);
6761                         if (!skb) {
6762                                 tg3_frag_free(frag_size != 0, data);
6763                                 goto drop_it_no_recycle;
6764                         }
6765                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6766                         /* Ensure that the update to the data happens
6767                          * after the usage of the old DMA mapping.
6768                          */
6769                         smp_wmb();
6770
6771                         ri->data = NULL;
6772
6773                 } else {
6774                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6775                                        desc_idx, *post_ptr);
6776
6777                         skb = netdev_alloc_skb(tp->dev,
6778                                                len + TG3_RAW_IP_ALIGN);
6779                         if (skb == NULL)
6780                                 goto drop_it_no_recycle;
6781
6782                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6783                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6784                         memcpy(skb->data,
6785                                data + TG3_RX_OFFSET(tp),
6786                                len);
6787                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6788                 }
6789
6790                 skb_put(skb, len);
6791                 if (tstamp)
6792                         tg3_hwclock_to_timestamp(tp, tstamp,
6793                                                  skb_hwtstamps(skb));
6794
6795                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6796                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6797                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6798                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6799                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6800                 else
6801                         skb_checksum_none_assert(skb);
6802
6803                 skb->protocol = eth_type_trans(skb, tp->dev);
6804
6805                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6806                     skb->protocol != htons(ETH_P_8021Q)) {
6807                         dev_kfree_skb(skb);
6808                         goto drop_it_no_recycle;
6809                 }
6810
6811                 if (desc->type_flags & RXD_FLAG_VLAN &&
6812                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6813                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6814                                                desc->err_vlan & RXD_VLAN_MASK);
6815
6816                 napi_gro_receive(&tnapi->napi, skb);
6817
6818                 received++;
6819                 budget--;
6820
6821 next_pkt:
6822                 (*post_ptr)++;
6823
6824                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6825                         tpr->rx_std_prod_idx = std_prod_idx &
6826                                                tp->rx_std_ring_mask;
6827                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6828                                      tpr->rx_std_prod_idx);
6829                         work_mask &= ~RXD_OPAQUE_RING_STD;
6830                         rx_std_posted = 0;
6831                 }
6832 next_pkt_nopost:
6833                 sw_idx++;
6834                 sw_idx &= tp->rx_ret_ring_mask;
6835
6836                 /* Refresh hw_idx to see if there is new work */
6837                 if (sw_idx == hw_idx) {
6838                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6839                         rmb();
6840                 }
6841         }
6842
6843         /* ACK the status ring. */
6844         tnapi->rx_rcb_ptr = sw_idx;
6845         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6846
6847         /* Refill RX ring(s). */
6848         if (!tg3_flag(tp, ENABLE_RSS)) {
6849                 /* Sync BD data before updating mailbox */
6850                 wmb();
6851
6852                 if (work_mask & RXD_OPAQUE_RING_STD) {
6853                         tpr->rx_std_prod_idx = std_prod_idx &
6854                                                tp->rx_std_ring_mask;
6855                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6856                                      tpr->rx_std_prod_idx);
6857                 }
6858                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6859                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6860                                                tp->rx_jmb_ring_mask;
6861                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6862                                      tpr->rx_jmb_prod_idx);
6863                 }
6864                 mmiowb();
6865         } else if (work_mask) {
6866                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6867                  * updated before the producer indices can be updated.
6868                  */
6869                 smp_wmb();
6870
6871                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6872                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6873
6874                 if (tnapi != &tp->napi[1]) {
6875                         tp->rx_refill = true;
6876                         napi_schedule(&tp->napi[1].napi);
6877                 }
6878         }
6879
6880         return received;
6881 }
6882
6883 static void tg3_poll_link(struct tg3 *tp)
6884 {
6885         /* handle link change and other phy events */
6886         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6887                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6888
6889                 if (sblk->status & SD_STATUS_LINK_CHG) {
6890                         sblk->status = SD_STATUS_UPDATED |
6891                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6892                         spin_lock(&tp->lock);
6893                         if (tg3_flag(tp, USE_PHYLIB)) {
6894                                 tw32_f(MAC_STATUS,
6895                                      (MAC_STATUS_SYNC_CHANGED |
6896                                       MAC_STATUS_CFG_CHANGED |
6897                                       MAC_STATUS_MI_COMPLETION |
6898                                       MAC_STATUS_LNKSTATE_CHANGED));
6899                                 udelay(40);
6900                         } else
6901                                 tg3_setup_phy(tp, false);
6902                         spin_unlock(&tp->lock);
6903                 }
6904         }
6905 }
6906
6907 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6908                                 struct tg3_rx_prodring_set *dpr,
6909                                 struct tg3_rx_prodring_set *spr)
6910 {
6911         u32 si, di, cpycnt, src_prod_idx;
6912         int i, err = 0;
6913
6914         while (1) {
6915                 src_prod_idx = spr->rx_std_prod_idx;
6916
6917                 /* Make sure updates to the rx_std_buffers[] entries and the
6918                  * standard producer index are seen in the correct order.
6919                  */
6920                 smp_rmb();
6921
6922                 if (spr->rx_std_cons_idx == src_prod_idx)
6923                         break;
6924
6925                 if (spr->rx_std_cons_idx < src_prod_idx)
6926                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6927                 else
6928                         cpycnt = tp->rx_std_ring_mask + 1 -
6929                                  spr->rx_std_cons_idx;
6930
6931                 cpycnt = min(cpycnt,
6932                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6933
6934                 si = spr->rx_std_cons_idx;
6935                 di = dpr->rx_std_prod_idx;
6936
6937                 for (i = di; i < di + cpycnt; i++) {
6938                         if (dpr->rx_std_buffers[i].data) {
6939                                 cpycnt = i - di;
6940                                 err = -ENOSPC;
6941                                 break;
6942                         }
6943                 }
6944
6945                 if (!cpycnt)
6946                         break;
6947
6948                 /* Ensure that updates to the rx_std_buffers ring and the
6949                  * shadowed hardware producer ring from tg3_recycle_skb() are
6950                  * ordered correctly WRT the skb check above.
6951                  */
6952                 smp_rmb();
6953
6954                 memcpy(&dpr->rx_std_buffers[di],
6955                        &spr->rx_std_buffers[si],
6956                        cpycnt * sizeof(struct ring_info));
6957
6958                 for (i = 0; i < cpycnt; i++, di++, si++) {
6959                         struct tg3_rx_buffer_desc *sbd, *dbd;
6960                         sbd = &spr->rx_std[si];
6961                         dbd = &dpr->rx_std[di];
6962                         dbd->addr_hi = sbd->addr_hi;
6963                         dbd->addr_lo = sbd->addr_lo;
6964                 }
6965
6966                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6967                                        tp->rx_std_ring_mask;
6968                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6969                                        tp->rx_std_ring_mask;
6970         }
6971
6972         while (1) {
6973                 src_prod_idx = spr->rx_jmb_prod_idx;
6974
6975                 /* Make sure updates to the rx_jmb_buffers[] entries and
6976                  * the jumbo producer index are seen in the correct order.
6977                  */
6978                 smp_rmb();
6979
6980                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6981                         break;
6982
6983                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6984                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6985                 else
6986                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6987                                  spr->rx_jmb_cons_idx;
6988
6989                 cpycnt = min(cpycnt,
6990                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6991
6992                 si = spr->rx_jmb_cons_idx;
6993                 di = dpr->rx_jmb_prod_idx;
6994
6995                 for (i = di; i < di + cpycnt; i++) {
6996                         if (dpr->rx_jmb_buffers[i].data) {
6997                                 cpycnt = i - di;
6998                                 err = -ENOSPC;
6999                                 break;
7000                         }
7001                 }
7002
7003                 if (!cpycnt)
7004                         break;
7005
7006                 /* Ensure that updates to the rx_jmb_buffers ring and the
7007                  * shadowed hardware producer ring from tg3_recycle_skb() are
7008                  * ordered correctly WRT the skb check above.
7009                  */
7010                 smp_rmb();
7011
7012                 memcpy(&dpr->rx_jmb_buffers[di],
7013                        &spr->rx_jmb_buffers[si],
7014                        cpycnt * sizeof(struct ring_info));
7015
7016                 for (i = 0; i < cpycnt; i++, di++, si++) {
7017                         struct tg3_rx_buffer_desc *sbd, *dbd;
7018                         sbd = &spr->rx_jmb[si].std;
7019                         dbd = &dpr->rx_jmb[di].std;
7020                         dbd->addr_hi = sbd->addr_hi;
7021                         dbd->addr_lo = sbd->addr_lo;
7022                 }
7023
7024                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7025                                        tp->rx_jmb_ring_mask;
7026                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7027                                        tp->rx_jmb_ring_mask;
7028         }
7029
7030         return err;
7031 }
7032
7033 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7034 {
7035         struct tg3 *tp = tnapi->tp;
7036
7037         /* run TX completion thread */
7038         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7039                 tg3_tx(tnapi);
7040                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7041                         return work_done;
7042         }
7043
7044         if (!tnapi->rx_rcb_prod_idx)
7045                 return work_done;
7046
7047         /* run RX thread, within the bounds set by NAPI.
7048          * All RX "locking" is done by ensuring outside
7049          * code synchronizes with tg3->napi.poll()
7050          */
7051         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7052                 work_done += tg3_rx(tnapi, budget - work_done);
7053
7054         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7055                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7056                 int i, err = 0;
7057                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7058                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7059
7060                 tp->rx_refill = false;
7061                 for (i = 1; i <= tp->rxq_cnt; i++)
7062                         err |= tg3_rx_prodring_xfer(tp, dpr,
7063                                                     &tp->napi[i].prodring);
7064
7065                 wmb();
7066
7067                 if (std_prod_idx != dpr->rx_std_prod_idx)
7068                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7069                                      dpr->rx_std_prod_idx);
7070
7071                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7072                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7073                                      dpr->rx_jmb_prod_idx);
7074
7075                 mmiowb();
7076
7077                 if (err)
7078                         tw32_f(HOSTCC_MODE, tp->coal_now);
7079         }
7080
7081         return work_done;
7082 }
7083
7084 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7085 {
7086         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7087                 schedule_work(&tp->reset_task);
7088 }
7089
7090 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7091 {
7092         cancel_work_sync(&tp->reset_task);
7093         tg3_flag_clear(tp, RESET_TASK_PENDING);
7094         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7095 }
7096
7097 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7098 {
7099         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7100         struct tg3 *tp = tnapi->tp;
7101         int work_done = 0;
7102         struct tg3_hw_status *sblk = tnapi->hw_status;
7103
7104         while (1) {
7105                 work_done = tg3_poll_work(tnapi, work_done, budget);
7106
7107                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7108                         goto tx_recovery;
7109
7110                 if (unlikely(work_done >= budget))
7111                         break;
7112
7113                 /* tp->last_tag is used in tg3_int_reenable() below
7114                  * to tell the hw how much work has been processed,
7115                  * so we must read it before checking for more work.
7116                  */
7117                 tnapi->last_tag = sblk->status_tag;
7118                 tnapi->last_irq_tag = tnapi->last_tag;
7119                 rmb();
7120
7121                 /* check for RX/TX work to do */
7122                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7123                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7124
7125                         /* This test here is not race free, but will reduce
7126                          * the number of interrupts by looping again.
7127                          */
7128                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7129                                 continue;
7130
7131                         napi_complete(napi);
7132                         /* Reenable interrupts. */
7133                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7134
7135                         /* This test here is synchronized by napi_schedule()
7136                          * and napi_complete() to close the race condition.
7137                          */
7138                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7139                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7140                                                   HOSTCC_MODE_ENABLE |
7141                                                   tnapi->coal_now);
7142                         }
7143                         mmiowb();
7144                         break;
7145                 }
7146         }
7147
7148         return work_done;
7149
7150 tx_recovery:
7151         /* work_done is guaranteed to be less than budget. */
7152         napi_complete(napi);
7153         tg3_reset_task_schedule(tp);
7154         return work_done;
7155 }
7156
7157 static void tg3_process_error(struct tg3 *tp)
7158 {
7159         u32 val;
7160         bool real_error = false;
7161
7162         if (tg3_flag(tp, ERROR_PROCESSED))
7163                 return;
7164
7165         /* Check Flow Attention register */
7166         val = tr32(HOSTCC_FLOW_ATTN);
7167         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7168                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7169                 real_error = true;
7170         }
7171
7172         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7173                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7174                 real_error = true;
7175         }
7176
7177         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7178                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7179                 real_error = true;
7180         }
7181
7182         if (!real_error)
7183                 return;
7184
7185         tg3_dump_state(tp);
7186
7187         tg3_flag_set(tp, ERROR_PROCESSED);
7188         tg3_reset_task_schedule(tp);
7189 }
7190
7191 static int tg3_poll(struct napi_struct *napi, int budget)
7192 {
7193         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7194         struct tg3 *tp = tnapi->tp;
7195         int work_done = 0;
7196         struct tg3_hw_status *sblk = tnapi->hw_status;
7197
7198         while (1) {
7199                 if (sblk->status & SD_STATUS_ERROR)
7200                         tg3_process_error(tp);
7201
7202                 tg3_poll_link(tp);
7203
7204                 work_done = tg3_poll_work(tnapi, work_done, budget);
7205
7206                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7207                         goto tx_recovery;
7208
7209                 if (unlikely(work_done >= budget))
7210                         break;
7211
7212                 if (tg3_flag(tp, TAGGED_STATUS)) {
7213                         /* tp->last_tag is used in tg3_int_reenable() below
7214                          * to tell the hw how much work has been processed,
7215                          * so we must read it before checking for more work.
7216                          */
7217                         tnapi->last_tag = sblk->status_tag;
7218                         tnapi->last_irq_tag = tnapi->last_tag;
7219                         rmb();
7220                 } else
7221                         sblk->status &= ~SD_STATUS_UPDATED;
7222
7223                 if (likely(!tg3_has_work(tnapi))) {
7224                         napi_complete(napi);
7225                         tg3_int_reenable(tnapi);
7226                         break;
7227                 }
7228         }
7229
7230         return work_done;
7231
7232 tx_recovery:
7233         /* work_done is guaranteed to be less than budget. */
7234         napi_complete(napi);
7235         tg3_reset_task_schedule(tp);
7236         return work_done;
7237 }
7238
7239 static void tg3_napi_disable(struct tg3 *tp)
7240 {
7241         int i;
7242
7243         for (i = tp->irq_cnt - 1; i >= 0; i--)
7244                 napi_disable(&tp->napi[i].napi);
7245 }
7246
7247 static void tg3_napi_enable(struct tg3 *tp)
7248 {
7249         int i;
7250
7251         for (i = 0; i < tp->irq_cnt; i++)
7252                 napi_enable(&tp->napi[i].napi);
7253 }
7254
7255 static void tg3_napi_init(struct tg3 *tp)
7256 {
7257         int i;
7258
7259         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7260         for (i = 1; i < tp->irq_cnt; i++)
7261                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7262 }
7263
7264 static void tg3_napi_fini(struct tg3 *tp)
7265 {
7266         int i;
7267
7268         for (i = 0; i < tp->irq_cnt; i++)
7269                 netif_napi_del(&tp->napi[i].napi);
7270 }
7271
7272 static inline void tg3_netif_stop(struct tg3 *tp)
7273 {
7274         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7275         tg3_napi_disable(tp);
7276         netif_carrier_off(tp->dev);
7277         netif_tx_disable(tp->dev);
7278 }
7279
7280 /* tp->lock must be held */
7281 static inline void tg3_netif_start(struct tg3 *tp)
7282 {
7283         tg3_ptp_resume(tp);
7284
7285         /* NOTE: unconditional netif_tx_wake_all_queues is only
7286          * appropriate so long as all callers are assured to
7287          * have free tx slots (such as after tg3_init_hw)
7288          */
7289         netif_tx_wake_all_queues(tp->dev);
7290
7291         if (tp->link_up)
7292                 netif_carrier_on(tp->dev);
7293
7294         tg3_napi_enable(tp);
7295         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7296         tg3_enable_ints(tp);
7297 }
7298
7299 static void tg3_irq_quiesce(struct tg3 *tp)
7300 {
7301         int i;
7302
7303         BUG_ON(tp->irq_sync);
7304
7305         tp->irq_sync = 1;
7306         smp_mb();
7307
7308         for (i = 0; i < tp->irq_cnt; i++)
7309                 synchronize_irq(tp->napi[i].irq_vec);
7310 }
7311
7312 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7313  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7314  * with as well.  Most of the time, this is not necessary except when
7315  * shutting down the device.
7316  */
7317 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7318 {
7319         spin_lock_bh(&tp->lock);
7320         if (irq_sync)
7321                 tg3_irq_quiesce(tp);
7322 }
7323
7324 static inline void tg3_full_unlock(struct tg3 *tp)
7325 {
7326         spin_unlock_bh(&tp->lock);
7327 }
7328
7329 /* One-shot MSI handler - Chip automatically disables interrupt
7330  * after sending MSI so driver doesn't have to do it.
7331  */
7332 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7333 {
7334         struct tg3_napi *tnapi = dev_id;
7335         struct tg3 *tp = tnapi->tp;
7336
7337         prefetch(tnapi->hw_status);
7338         if (tnapi->rx_rcb)
7339                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7340
7341         if (likely(!tg3_irq_sync(tp)))
7342                 napi_schedule(&tnapi->napi);
7343
7344         return IRQ_HANDLED;
7345 }
7346
7347 /* MSI ISR - No need to check for interrupt sharing and no need to
7348  * flush status block and interrupt mailbox. PCI ordering rules
7349  * guarantee that MSI will arrive after the status block.
7350  */
7351 static irqreturn_t tg3_msi(int irq, void *dev_id)
7352 {
7353         struct tg3_napi *tnapi = dev_id;
7354         struct tg3 *tp = tnapi->tp;
7355
7356         prefetch(tnapi->hw_status);
7357         if (tnapi->rx_rcb)
7358                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7359         /*
7360          * Writing any value to intr-mbox-0 clears PCI INTA# and
7361          * chip-internal interrupt pending events.
7362          * Writing non-zero to intr-mbox-0 additional tells the
7363          * NIC to stop sending us irqs, engaging "in-intr-handler"
7364          * event coalescing.
7365          */
7366         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7367         if (likely(!tg3_irq_sync(tp)))
7368                 napi_schedule(&tnapi->napi);
7369
7370         return IRQ_RETVAL(1);
7371 }
7372
7373 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7374 {
7375         struct tg3_napi *tnapi = dev_id;
7376         struct tg3 *tp = tnapi->tp;
7377         struct tg3_hw_status *sblk = tnapi->hw_status;
7378         unsigned int handled = 1;
7379
7380         /* In INTx mode, it is possible for the interrupt to arrive at
7381          * the CPU before the status block posted prior to the interrupt.
7382          * Reading the PCI State register will confirm whether the
7383          * interrupt is ours and will flush the status block.
7384          */
7385         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7386                 if (tg3_flag(tp, CHIP_RESETTING) ||
7387                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7388                         handled = 0;
7389                         goto out;
7390                 }
7391         }
7392
7393         /*
7394          * Writing any value to intr-mbox-0 clears PCI INTA# and
7395          * chip-internal interrupt pending events.
7396          * Writing non-zero to intr-mbox-0 additional tells the
7397          * NIC to stop sending us irqs, engaging "in-intr-handler"
7398          * event coalescing.
7399          *
7400          * Flush the mailbox to de-assert the IRQ immediately to prevent
7401          * spurious interrupts.  The flush impacts performance but
7402          * excessive spurious interrupts can be worse in some cases.
7403          */
7404         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7405         if (tg3_irq_sync(tp))
7406                 goto out;
7407         sblk->status &= ~SD_STATUS_UPDATED;
7408         if (likely(tg3_has_work(tnapi))) {
7409                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7410                 napi_schedule(&tnapi->napi);
7411         } else {
7412                 /* No work, shared interrupt perhaps?  re-enable
7413                  * interrupts, and flush that PCI write
7414                  */
7415                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7416                                0x00000000);
7417         }
7418 out:
7419         return IRQ_RETVAL(handled);
7420 }
7421
7422 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7423 {
7424         struct tg3_napi *tnapi = dev_id;
7425         struct tg3 *tp = tnapi->tp;
7426         struct tg3_hw_status *sblk = tnapi->hw_status;
7427         unsigned int handled = 1;
7428
7429         /* In INTx mode, it is possible for the interrupt to arrive at
7430          * the CPU before the status block posted prior to the interrupt.
7431          * Reading the PCI State register will confirm whether the
7432          * interrupt is ours and will flush the status block.
7433          */
7434         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7435                 if (tg3_flag(tp, CHIP_RESETTING) ||
7436                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7437                         handled = 0;
7438                         goto out;
7439                 }
7440         }
7441
7442         /*
7443          * writing any value to intr-mbox-0 clears PCI INTA# and
7444          * chip-internal interrupt pending events.
7445          * writing non-zero to intr-mbox-0 additional tells the
7446          * NIC to stop sending us irqs, engaging "in-intr-handler"
7447          * event coalescing.
7448          *
7449          * Flush the mailbox to de-assert the IRQ immediately to prevent
7450          * spurious interrupts.  The flush impacts performance but
7451          * excessive spurious interrupts can be worse in some cases.
7452          */
7453         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7454
7455         /*
7456          * In a shared interrupt configuration, sometimes other devices'
7457          * interrupts will scream.  We record the current status tag here
7458          * so that the above check can report that the screaming interrupts
7459          * are unhandled.  Eventually they will be silenced.
7460          */
7461         tnapi->last_irq_tag = sblk->status_tag;
7462
7463         if (tg3_irq_sync(tp))
7464                 goto out;
7465
7466         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7467
7468         napi_schedule(&tnapi->napi);
7469
7470 out:
7471         return IRQ_RETVAL(handled);
7472 }
7473
7474 /* ISR for interrupt test */
7475 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7476 {
7477         struct tg3_napi *tnapi = dev_id;
7478         struct tg3 *tp = tnapi->tp;
7479         struct tg3_hw_status *sblk = tnapi->hw_status;
7480
7481         if ((sblk->status & SD_STATUS_UPDATED) ||
7482             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7483                 tg3_disable_ints(tp);
7484                 return IRQ_RETVAL(1);
7485         }
7486         return IRQ_RETVAL(0);
7487 }
7488
7489 #ifdef CONFIG_NET_POLL_CONTROLLER
7490 static void tg3_poll_controller(struct net_device *dev)
7491 {
7492         int i;
7493         struct tg3 *tp = netdev_priv(dev);
7494
7495         if (tg3_irq_sync(tp))
7496                 return;
7497
7498         for (i = 0; i < tp->irq_cnt; i++)
7499                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7500 }
7501 #endif
7502
7503 static void tg3_tx_timeout(struct net_device *dev)
7504 {
7505         struct tg3 *tp = netdev_priv(dev);
7506
7507         if (netif_msg_tx_err(tp)) {
7508                 netdev_err(dev, "transmit timed out, resetting\n");
7509                 tg3_dump_state(tp);
7510         }
7511
7512         tg3_reset_task_schedule(tp);
7513 }
7514
7515 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7516 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7517 {
7518         u32 base = (u32) mapping & 0xffffffff;
7519
7520         return (base > 0xffffdcc0) && (base + len + 8 < base);
7521 }
7522
7523 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7524  * of any 4GB boundaries: 4G, 8G, etc
7525  */
7526 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7527                                            u32 len, u32 mss)
7528 {
7529         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7530                 u32 base = (u32) mapping & 0xffffffff;
7531
7532                 return ((base + len + (mss & 0x3fff)) < base);
7533         }
7534         return 0;
7535 }
7536
7537 /* Test for DMA addresses > 40-bit */
7538 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7539                                           int len)
7540 {
7541 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7542         if (tg3_flag(tp, 40BIT_DMA_BUG))
7543                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7544         return 0;
7545 #else
7546         return 0;
7547 #endif
7548 }
7549
7550 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7551                                  dma_addr_t mapping, u32 len, u32 flags,
7552                                  u32 mss, u32 vlan)
7553 {
7554         txbd->addr_hi = ((u64) mapping >> 32);
7555         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7556         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7557         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7558 }
7559
7560 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7561                             dma_addr_t map, u32 len, u32 flags,
7562                             u32 mss, u32 vlan)
7563 {
7564         struct tg3 *tp = tnapi->tp;
7565         bool hwbug = false;
7566
7567         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7568                 hwbug = true;
7569
7570         if (tg3_4g_overflow_test(map, len))
7571                 hwbug = true;
7572
7573         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7574                 hwbug = true;
7575
7576         if (tg3_40bit_overflow_test(tp, map, len))
7577                 hwbug = true;
7578
7579         if (tp->dma_limit) {
7580                 u32 prvidx = *entry;
7581                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7582                 while (len > tp->dma_limit && *budget) {
7583                         u32 frag_len = tp->dma_limit;
7584                         len -= tp->dma_limit;
7585
7586                         /* Avoid the 8byte DMA problem */
7587                         if (len <= 8) {
7588                                 len += tp->dma_limit / 2;
7589                                 frag_len = tp->dma_limit / 2;
7590                         }
7591
7592                         tnapi->tx_buffers[*entry].fragmented = true;
7593
7594                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7595                                       frag_len, tmp_flag, mss, vlan);
7596                         *budget -= 1;
7597                         prvidx = *entry;
7598                         *entry = NEXT_TX(*entry);
7599
7600                         map += frag_len;
7601                 }
7602
7603                 if (len) {
7604                         if (*budget) {
7605                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7606                                               len, flags, mss, vlan);
7607                                 *budget -= 1;
7608                                 *entry = NEXT_TX(*entry);
7609                         } else {
7610                                 hwbug = true;
7611                                 tnapi->tx_buffers[prvidx].fragmented = false;
7612                         }
7613                 }
7614         } else {
7615                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7616                               len, flags, mss, vlan);
7617                 *entry = NEXT_TX(*entry);
7618         }
7619
7620         return hwbug;
7621 }
7622
7623 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7624 {
7625         int i;
7626         struct sk_buff *skb;
7627         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7628
7629         skb = txb->skb;
7630         txb->skb = NULL;
7631
7632         pci_unmap_single(tnapi->tp->pdev,
7633                          dma_unmap_addr(txb, mapping),
7634                          skb_headlen(skb),
7635                          PCI_DMA_TODEVICE);
7636
7637         while (txb->fragmented) {
7638                 txb->fragmented = false;
7639                 entry = NEXT_TX(entry);
7640                 txb = &tnapi->tx_buffers[entry];
7641         }
7642
7643         for (i = 0; i <= last; i++) {
7644                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7645
7646                 entry = NEXT_TX(entry);
7647                 txb = &tnapi->tx_buffers[entry];
7648
7649                 pci_unmap_page(tnapi->tp->pdev,
7650                                dma_unmap_addr(txb, mapping),
7651                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7652
7653                 while (txb->fragmented) {
7654                         txb->fragmented = false;
7655                         entry = NEXT_TX(entry);
7656                         txb = &tnapi->tx_buffers[entry];
7657                 }
7658         }
7659 }
7660
7661 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7662 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7663                                        struct sk_buff **pskb,
7664                                        u32 *entry, u32 *budget,
7665                                        u32 base_flags, u32 mss, u32 vlan)
7666 {
7667         struct tg3 *tp = tnapi->tp;
7668         struct sk_buff *new_skb, *skb = *pskb;
7669         dma_addr_t new_addr = 0;
7670         int ret = 0;
7671
7672         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7673                 new_skb = skb_copy(skb, GFP_ATOMIC);
7674         else {
7675                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7676
7677                 new_skb = skb_copy_expand(skb,
7678                                           skb_headroom(skb) + more_headroom,
7679                                           skb_tailroom(skb), GFP_ATOMIC);
7680         }
7681
7682         if (!new_skb) {
7683                 ret = -1;
7684         } else {
7685                 /* New SKB is guaranteed to be linear. */
7686                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7687                                           PCI_DMA_TODEVICE);
7688                 /* Make sure the mapping succeeded */
7689                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7690                         dev_kfree_skb(new_skb);
7691                         ret = -1;
7692                 } else {
7693                         u32 save_entry = *entry;
7694
7695                         base_flags |= TXD_FLAG_END;
7696
7697                         tnapi->tx_buffers[*entry].skb = new_skb;
7698                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7699                                            mapping, new_addr);
7700
7701                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7702                                             new_skb->len, base_flags,
7703                                             mss, vlan)) {
7704                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7705                                 dev_kfree_skb(new_skb);
7706                                 ret = -1;
7707                         }
7708                 }
7709         }
7710
7711         dev_kfree_skb(skb);
7712         *pskb = new_skb;
7713         return ret;
7714 }
7715
7716 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7717
7718 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7719  * TSO header is greater than 80 bytes.
7720  */
7721 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7722 {
7723         struct sk_buff *segs, *nskb;
7724         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7725
7726         /* Estimate the number of fragments in the worst case */
7727         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7728                 netif_stop_queue(tp->dev);
7729
7730                 /* netif_tx_stop_queue() must be done before checking
7731                  * checking tx index in tg3_tx_avail() below, because in
7732                  * tg3_tx(), we update tx index before checking for
7733                  * netif_tx_queue_stopped().
7734                  */
7735                 smp_mb();
7736                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7737                         return NETDEV_TX_BUSY;
7738
7739                 netif_wake_queue(tp->dev);
7740         }
7741
7742         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7743         if (IS_ERR(segs))
7744                 goto tg3_tso_bug_end;
7745
7746         do {
7747                 nskb = segs;
7748                 segs = segs->next;
7749                 nskb->next = NULL;
7750                 tg3_start_xmit(nskb, tp->dev);
7751         } while (segs);
7752
7753 tg3_tso_bug_end:
7754         dev_kfree_skb(skb);
7755
7756         return NETDEV_TX_OK;
7757 }
7758
7759 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7760  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7761  */
7762 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7763 {
7764         struct tg3 *tp = netdev_priv(dev);
7765         u32 len, entry, base_flags, mss, vlan = 0;
7766         u32 budget;
7767         int i = -1, would_hit_hwbug;
7768         dma_addr_t mapping;
7769         struct tg3_napi *tnapi;
7770         struct netdev_queue *txq;
7771         unsigned int last;
7772
7773         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7774         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7775         if (tg3_flag(tp, ENABLE_TSS))
7776                 tnapi++;
7777
7778         budget = tg3_tx_avail(tnapi);
7779
7780         /* We are running in BH disabled context with netif_tx_lock
7781          * and TX reclaim runs via tp->napi.poll inside of a software
7782          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7783          * no IRQ context deadlocks to worry about either.  Rejoice!
7784          */
7785         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7786                 if (!netif_tx_queue_stopped(txq)) {
7787                         netif_tx_stop_queue(txq);
7788
7789                         /* This is a hard error, log it. */
7790                         netdev_err(dev,
7791                                    "BUG! Tx Ring full when queue awake!\n");
7792                 }
7793                 return NETDEV_TX_BUSY;
7794         }
7795
7796         entry = tnapi->tx_prod;
7797         base_flags = 0;
7798         if (skb->ip_summed == CHECKSUM_PARTIAL)
7799                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7800
7801         mss = skb_shinfo(skb)->gso_size;
7802         if (mss) {
7803                 struct iphdr *iph;
7804                 u32 tcp_opt_len, hdr_len;
7805
7806                 if (skb_header_cloned(skb) &&
7807                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7808                         goto drop;
7809
7810                 iph = ip_hdr(skb);
7811                 tcp_opt_len = tcp_optlen(skb);
7812
7813                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7814
7815                 if (!skb_is_gso_v6(skb)) {
7816                         iph->check = 0;
7817                         iph->tot_len = htons(mss + hdr_len);
7818                 }
7819
7820                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7821                     tg3_flag(tp, TSO_BUG))
7822                         return tg3_tso_bug(tp, skb);
7823
7824                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7825                                TXD_FLAG_CPU_POST_DMA);
7826
7827                 if (tg3_flag(tp, HW_TSO_1) ||
7828                     tg3_flag(tp, HW_TSO_2) ||
7829                     tg3_flag(tp, HW_TSO_3)) {
7830                         tcp_hdr(skb)->check = 0;
7831                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7832                 } else
7833                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7834                                                                  iph->daddr, 0,
7835                                                                  IPPROTO_TCP,
7836                                                                  0);
7837
7838                 if (tg3_flag(tp, HW_TSO_3)) {
7839                         mss |= (hdr_len & 0xc) << 12;
7840                         if (hdr_len & 0x10)
7841                                 base_flags |= 0x00000010;
7842                         base_flags |= (hdr_len & 0x3e0) << 5;
7843                 } else if (tg3_flag(tp, HW_TSO_2))
7844                         mss |= hdr_len << 9;
7845                 else if (tg3_flag(tp, HW_TSO_1) ||
7846                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7847                         if (tcp_opt_len || iph->ihl > 5) {
7848                                 int tsflags;
7849
7850                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7851                                 mss |= (tsflags << 11);
7852                         }
7853                 } else {
7854                         if (tcp_opt_len || iph->ihl > 5) {
7855                                 int tsflags;
7856
7857                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7858                                 base_flags |= tsflags << 12;
7859                         }
7860                 }
7861         }
7862
7863         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7864             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7865                 base_flags |= TXD_FLAG_JMB_PKT;
7866
7867         if (vlan_tx_tag_present(skb)) {
7868                 base_flags |= TXD_FLAG_VLAN;
7869                 vlan = vlan_tx_tag_get(skb);
7870         }
7871
7872         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7873             tg3_flag(tp, TX_TSTAMP_EN)) {
7874                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7875                 base_flags |= TXD_FLAG_HWTSTAMP;
7876         }
7877
7878         len = skb_headlen(skb);
7879
7880         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7881         if (pci_dma_mapping_error(tp->pdev, mapping))
7882                 goto drop;
7883
7884
7885         tnapi->tx_buffers[entry].skb = skb;
7886         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7887
7888         would_hit_hwbug = 0;
7889
7890         if (tg3_flag(tp, 5701_DMA_BUG))
7891                 would_hit_hwbug = 1;
7892
7893         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7894                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7895                             mss, vlan)) {
7896                 would_hit_hwbug = 1;
7897         } else if (skb_shinfo(skb)->nr_frags > 0) {
7898                 u32 tmp_mss = mss;
7899
7900                 if (!tg3_flag(tp, HW_TSO_1) &&
7901                     !tg3_flag(tp, HW_TSO_2) &&
7902                     !tg3_flag(tp, HW_TSO_3))
7903                         tmp_mss = 0;
7904
7905                 /* Now loop through additional data
7906                  * fragments, and queue them.
7907                  */
7908                 last = skb_shinfo(skb)->nr_frags - 1;
7909                 for (i = 0; i <= last; i++) {
7910                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7911
7912                         len = skb_frag_size(frag);
7913                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7914                                                    len, DMA_TO_DEVICE);
7915
7916                         tnapi->tx_buffers[entry].skb = NULL;
7917                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7918                                            mapping);
7919                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7920                                 goto dma_error;
7921
7922                         if (!budget ||
7923                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7924                                             len, base_flags |
7925                                             ((i == last) ? TXD_FLAG_END : 0),
7926                                             tmp_mss, vlan)) {
7927                                 would_hit_hwbug = 1;
7928                                 break;
7929                         }
7930                 }
7931         }
7932
7933         if (would_hit_hwbug) {
7934                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7935
7936                 /* If the workaround fails due to memory/mapping
7937                  * failure, silently drop this packet.
7938                  */
7939                 entry = tnapi->tx_prod;
7940                 budget = tg3_tx_avail(tnapi);
7941                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7942                                                 base_flags, mss, vlan))
7943                         goto drop_nofree;
7944         }
7945
7946         skb_tx_timestamp(skb);
7947         netdev_tx_sent_queue(txq, skb->len);
7948
7949         /* Sync BD data before updating mailbox */
7950         wmb();
7951
7952         /* Packets are ready, update Tx producer idx local and on card. */
7953         tw32_tx_mbox(tnapi->prodmbox, entry);
7954
7955         tnapi->tx_prod = entry;
7956         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7957                 netif_tx_stop_queue(txq);
7958
7959                 /* netif_tx_stop_queue() must be done before checking
7960                  * checking tx index in tg3_tx_avail() below, because in
7961                  * tg3_tx(), we update tx index before checking for
7962                  * netif_tx_queue_stopped().
7963                  */
7964                 smp_mb();
7965                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7966                         netif_tx_wake_queue(txq);
7967         }
7968
7969         mmiowb();
7970         return NETDEV_TX_OK;
7971
7972 dma_error:
7973         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7974         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7975 drop:
7976         dev_kfree_skb(skb);
7977 drop_nofree:
7978         tp->tx_dropped++;
7979         return NETDEV_TX_OK;
7980 }
7981
7982 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7983 {
7984         if (enable) {
7985                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7986                                   MAC_MODE_PORT_MODE_MASK);
7987
7988                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7989
7990                 if (!tg3_flag(tp, 5705_PLUS))
7991                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7992
7993                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7994                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7995                 else
7996                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7997         } else {
7998                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7999
8000                 if (tg3_flag(tp, 5705_PLUS) ||
8001                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8002                     tg3_asic_rev(tp) == ASIC_REV_5700)
8003                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8004         }
8005
8006         tw32(MAC_MODE, tp->mac_mode);
8007         udelay(40);
8008 }
8009
8010 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8011 {
8012         u32 val, bmcr, mac_mode, ptest = 0;
8013
8014         tg3_phy_toggle_apd(tp, false);
8015         tg3_phy_toggle_automdix(tp, false);
8016
8017         if (extlpbk && tg3_phy_set_extloopbk(tp))
8018                 return -EIO;
8019
8020         bmcr = BMCR_FULLDPLX;
8021         switch (speed) {
8022         case SPEED_10:
8023                 break;
8024         case SPEED_100:
8025                 bmcr |= BMCR_SPEED100;
8026                 break;
8027         case SPEED_1000:
8028         default:
8029                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8030                         speed = SPEED_100;
8031                         bmcr |= BMCR_SPEED100;
8032                 } else {
8033                         speed = SPEED_1000;
8034                         bmcr |= BMCR_SPEED1000;
8035                 }
8036         }
8037
8038         if (extlpbk) {
8039                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8040                         tg3_readphy(tp, MII_CTRL1000, &val);
8041                         val |= CTL1000_AS_MASTER |
8042                                CTL1000_ENABLE_MASTER;
8043                         tg3_writephy(tp, MII_CTRL1000, val);
8044                 } else {
8045                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8046                                 MII_TG3_FET_PTEST_TRIM_2;
8047                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8048                 }
8049         } else
8050                 bmcr |= BMCR_LOOPBACK;
8051
8052         tg3_writephy(tp, MII_BMCR, bmcr);
8053
8054         /* The write needs to be flushed for the FETs */
8055         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8056                 tg3_readphy(tp, MII_BMCR, &bmcr);
8057
8058         udelay(40);
8059
8060         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8061             tg3_asic_rev(tp) == ASIC_REV_5785) {
8062                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8063                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8064                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8065
8066                 /* The write needs to be flushed for the AC131 */
8067                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8068         }
8069
8070         /* Reset to prevent losing 1st rx packet intermittently */
8071         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8072             tg3_flag(tp, 5780_CLASS)) {
8073                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8074                 udelay(10);
8075                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8076         }
8077
8078         mac_mode = tp->mac_mode &
8079                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8080         if (speed == SPEED_1000)
8081                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8082         else
8083                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8084
8085         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8086                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8087
8088                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8089                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8090                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8091                         mac_mode |= MAC_MODE_LINK_POLARITY;
8092
8093                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8094                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8095         }
8096
8097         tw32(MAC_MODE, mac_mode);
8098         udelay(40);
8099
8100         return 0;
8101 }
8102
8103 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8104 {
8105         struct tg3 *tp = netdev_priv(dev);
8106
8107         if (features & NETIF_F_LOOPBACK) {
8108                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8109                         return;
8110
8111                 spin_lock_bh(&tp->lock);
8112                 tg3_mac_loopback(tp, true);
8113                 netif_carrier_on(tp->dev);
8114                 spin_unlock_bh(&tp->lock);
8115                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8116         } else {
8117                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8118                         return;
8119
8120                 spin_lock_bh(&tp->lock);
8121                 tg3_mac_loopback(tp, false);
8122                 /* Force link status check */
8123                 tg3_setup_phy(tp, true);
8124                 spin_unlock_bh(&tp->lock);
8125                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8126         }
8127 }
8128
8129 static netdev_features_t tg3_fix_features(struct net_device *dev,
8130         netdev_features_t features)
8131 {
8132         struct tg3 *tp = netdev_priv(dev);
8133
8134         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8135                 features &= ~NETIF_F_ALL_TSO;
8136
8137         return features;
8138 }
8139
8140 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8141 {
8142         netdev_features_t changed = dev->features ^ features;
8143
8144         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8145                 tg3_set_loopback(dev, features);
8146
8147         return 0;
8148 }
8149
8150 static void tg3_rx_prodring_free(struct tg3 *tp,
8151                                  struct tg3_rx_prodring_set *tpr)
8152 {
8153         int i;
8154
8155         if (tpr != &tp->napi[0].prodring) {
8156                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8157                      i = (i + 1) & tp->rx_std_ring_mask)
8158                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8159                                         tp->rx_pkt_map_sz);
8160
8161                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8162                         for (i = tpr->rx_jmb_cons_idx;
8163                              i != tpr->rx_jmb_prod_idx;
8164                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8165                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8166                                                 TG3_RX_JMB_MAP_SZ);
8167                         }
8168                 }
8169
8170                 return;
8171         }
8172
8173         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8174                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8175                                 tp->rx_pkt_map_sz);
8176
8177         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8178                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8179                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8180                                         TG3_RX_JMB_MAP_SZ);
8181         }
8182 }
8183
8184 /* Initialize rx rings for packet processing.
8185  *
8186  * The chip has been shut down and the driver detached from
8187  * the networking, so no interrupts or new tx packets will
8188  * end up in the driver.  tp->{tx,}lock are held and thus
8189  * we may not sleep.
8190  */
8191 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8192                                  struct tg3_rx_prodring_set *tpr)
8193 {
8194         u32 i, rx_pkt_dma_sz;
8195
8196         tpr->rx_std_cons_idx = 0;
8197         tpr->rx_std_prod_idx = 0;
8198         tpr->rx_jmb_cons_idx = 0;
8199         tpr->rx_jmb_prod_idx = 0;
8200
8201         if (tpr != &tp->napi[0].prodring) {
8202                 memset(&tpr->rx_std_buffers[0], 0,
8203                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8204                 if (tpr->rx_jmb_buffers)
8205                         memset(&tpr->rx_jmb_buffers[0], 0,
8206                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8207                 goto done;
8208         }
8209
8210         /* Zero out all descriptors. */
8211         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8212
8213         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8214         if (tg3_flag(tp, 5780_CLASS) &&
8215             tp->dev->mtu > ETH_DATA_LEN)
8216                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8217         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8218
8219         /* Initialize invariants of the rings, we only set this
8220          * stuff once.  This works because the card does not
8221          * write into the rx buffer posting rings.
8222          */
8223         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8224                 struct tg3_rx_buffer_desc *rxd;
8225
8226                 rxd = &tpr->rx_std[i];
8227                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8228                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8229                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8230                                (i << RXD_OPAQUE_INDEX_SHIFT));
8231         }
8232
8233         /* Now allocate fresh SKBs for each rx ring. */
8234         for (i = 0; i < tp->rx_pending; i++) {
8235                 unsigned int frag_size;
8236
8237                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8238                                       &frag_size) < 0) {
8239                         netdev_warn(tp->dev,
8240                                     "Using a smaller RX standard ring. Only "
8241                                     "%d out of %d buffers were allocated "
8242                                     "successfully\n", i, tp->rx_pending);
8243                         if (i == 0)
8244                                 goto initfail;
8245                         tp->rx_pending = i;
8246                         break;
8247                 }
8248         }
8249
8250         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8251                 goto done;
8252
8253         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8254
8255         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8256                 goto done;
8257
8258         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8259                 struct tg3_rx_buffer_desc *rxd;
8260
8261                 rxd = &tpr->rx_jmb[i].std;
8262                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8263                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8264                                   RXD_FLAG_JUMBO;
8265                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8266                        (i << RXD_OPAQUE_INDEX_SHIFT));
8267         }
8268
8269         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8270                 unsigned int frag_size;
8271
8272                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8273                                       &frag_size) < 0) {
8274                         netdev_warn(tp->dev,
8275                                     "Using a smaller RX jumbo ring. Only %d "
8276                                     "out of %d buffers were allocated "
8277                                     "successfully\n", i, tp->rx_jumbo_pending);
8278                         if (i == 0)
8279                                 goto initfail;
8280                         tp->rx_jumbo_pending = i;
8281                         break;
8282                 }
8283         }
8284
8285 done:
8286         return 0;
8287
8288 initfail:
8289         tg3_rx_prodring_free(tp, tpr);
8290         return -ENOMEM;
8291 }
8292
8293 static void tg3_rx_prodring_fini(struct tg3 *tp,
8294                                  struct tg3_rx_prodring_set *tpr)
8295 {
8296         kfree(tpr->rx_std_buffers);
8297         tpr->rx_std_buffers = NULL;
8298         kfree(tpr->rx_jmb_buffers);
8299         tpr->rx_jmb_buffers = NULL;
8300         if (tpr->rx_std) {
8301                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8302                                   tpr->rx_std, tpr->rx_std_mapping);
8303                 tpr->rx_std = NULL;
8304         }
8305         if (tpr->rx_jmb) {
8306                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8307                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8308                 tpr->rx_jmb = NULL;
8309         }
8310 }
8311
8312 static int tg3_rx_prodring_init(struct tg3 *tp,
8313                                 struct tg3_rx_prodring_set *tpr)
8314 {
8315         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8316                                       GFP_KERNEL);
8317         if (!tpr->rx_std_buffers)
8318                 return -ENOMEM;
8319
8320         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8321                                          TG3_RX_STD_RING_BYTES(tp),
8322                                          &tpr->rx_std_mapping,
8323                                          GFP_KERNEL);
8324         if (!tpr->rx_std)
8325                 goto err_out;
8326
8327         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8328                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8329                                               GFP_KERNEL);
8330                 if (!tpr->rx_jmb_buffers)
8331                         goto err_out;
8332
8333                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8334                                                  TG3_RX_JMB_RING_BYTES(tp),
8335                                                  &tpr->rx_jmb_mapping,
8336                                                  GFP_KERNEL);
8337                 if (!tpr->rx_jmb)
8338                         goto err_out;
8339         }
8340
8341         return 0;
8342
8343 err_out:
8344         tg3_rx_prodring_fini(tp, tpr);
8345         return -ENOMEM;
8346 }
8347
8348 /* Free up pending packets in all rx/tx rings.
8349  *
8350  * The chip has been shut down and the driver detached from
8351  * the networking, so no interrupts or new tx packets will
8352  * end up in the driver.  tp->{tx,}lock is not held and we are not
8353  * in an interrupt context and thus may sleep.
8354  */
8355 static void tg3_free_rings(struct tg3 *tp)
8356 {
8357         int i, j;
8358
8359         for (j = 0; j < tp->irq_cnt; j++) {
8360                 struct tg3_napi *tnapi = &tp->napi[j];
8361
8362                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8363
8364                 if (!tnapi->tx_buffers)
8365                         continue;
8366
8367                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8368                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8369
8370                         if (!skb)
8371                                 continue;
8372
8373                         tg3_tx_skb_unmap(tnapi, i,
8374                                          skb_shinfo(skb)->nr_frags - 1);
8375
8376                         dev_kfree_skb_any(skb);
8377                 }
8378                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8379         }
8380 }
8381
8382 /* Initialize tx/rx rings for packet processing.
8383  *
8384  * The chip has been shut down and the driver detached from
8385  * the networking, so no interrupts or new tx packets will
8386  * end up in the driver.  tp->{tx,}lock are held and thus
8387  * we may not sleep.
8388  */
8389 static int tg3_init_rings(struct tg3 *tp)
8390 {
8391         int i;
8392
8393         /* Free up all the SKBs. */
8394         tg3_free_rings(tp);
8395
8396         for (i = 0; i < tp->irq_cnt; i++) {
8397                 struct tg3_napi *tnapi = &tp->napi[i];
8398
8399                 tnapi->last_tag = 0;
8400                 tnapi->last_irq_tag = 0;
8401                 tnapi->hw_status->status = 0;
8402                 tnapi->hw_status->status_tag = 0;
8403                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8404
8405                 tnapi->tx_prod = 0;
8406                 tnapi->tx_cons = 0;
8407                 if (tnapi->tx_ring)
8408                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8409
8410                 tnapi->rx_rcb_ptr = 0;
8411                 if (tnapi->rx_rcb)
8412                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8413
8414                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8415                         tg3_free_rings(tp);
8416                         return -ENOMEM;
8417                 }
8418         }
8419
8420         return 0;
8421 }
8422
8423 static void tg3_mem_tx_release(struct tg3 *tp)
8424 {
8425         int i;
8426
8427         for (i = 0; i < tp->irq_max; i++) {
8428                 struct tg3_napi *tnapi = &tp->napi[i];
8429
8430                 if (tnapi->tx_ring) {
8431                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8432                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8433                         tnapi->tx_ring = NULL;
8434                 }
8435
8436                 kfree(tnapi->tx_buffers);
8437                 tnapi->tx_buffers = NULL;
8438         }
8439 }
8440
8441 static int tg3_mem_tx_acquire(struct tg3 *tp)
8442 {
8443         int i;
8444         struct tg3_napi *tnapi = &tp->napi[0];
8445
8446         /* If multivector TSS is enabled, vector 0 does not handle
8447          * tx interrupts.  Don't allocate any resources for it.
8448          */
8449         if (tg3_flag(tp, ENABLE_TSS))
8450                 tnapi++;
8451
8452         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8453                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8454                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8455                 if (!tnapi->tx_buffers)
8456                         goto err_out;
8457
8458                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8459                                                     TG3_TX_RING_BYTES,
8460                                                     &tnapi->tx_desc_mapping,
8461                                                     GFP_KERNEL);
8462                 if (!tnapi->tx_ring)
8463                         goto err_out;
8464         }
8465
8466         return 0;
8467
8468 err_out:
8469         tg3_mem_tx_release(tp);
8470         return -ENOMEM;
8471 }
8472
8473 static void tg3_mem_rx_release(struct tg3 *tp)
8474 {
8475         int i;
8476
8477         for (i = 0; i < tp->irq_max; i++) {
8478                 struct tg3_napi *tnapi = &tp->napi[i];
8479
8480                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8481
8482                 if (!tnapi->rx_rcb)
8483                         continue;
8484
8485                 dma_free_coherent(&tp->pdev->dev,
8486                                   TG3_RX_RCB_RING_BYTES(tp),
8487                                   tnapi->rx_rcb,
8488                                   tnapi->rx_rcb_mapping);
8489                 tnapi->rx_rcb = NULL;
8490         }
8491 }
8492
8493 static int tg3_mem_rx_acquire(struct tg3 *tp)
8494 {
8495         unsigned int i, limit;
8496
8497         limit = tp->rxq_cnt;
8498
8499         /* If RSS is enabled, we need a (dummy) producer ring
8500          * set on vector zero.  This is the true hw prodring.
8501          */
8502         if (tg3_flag(tp, ENABLE_RSS))
8503                 limit++;
8504
8505         for (i = 0; i < limit; i++) {
8506                 struct tg3_napi *tnapi = &tp->napi[i];
8507
8508                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8509                         goto err_out;
8510
8511                 /* If multivector RSS is enabled, vector 0
8512                  * does not handle rx or tx interrupts.
8513                  * Don't allocate any resources for it.
8514                  */
8515                 if (!i && tg3_flag(tp, ENABLE_RSS))
8516                         continue;
8517
8518                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8519                                                    TG3_RX_RCB_RING_BYTES(tp),
8520                                                    &tnapi->rx_rcb_mapping,
8521                                                    GFP_KERNEL | __GFP_ZERO);
8522                 if (!tnapi->rx_rcb)
8523                         goto err_out;
8524         }
8525
8526         return 0;
8527
8528 err_out:
8529         tg3_mem_rx_release(tp);
8530         return -ENOMEM;
8531 }
8532
8533 /*
8534  * Must not be invoked with interrupt sources disabled and
8535  * the hardware shutdown down.
8536  */
8537 static void tg3_free_consistent(struct tg3 *tp)
8538 {
8539         int i;
8540
8541         for (i = 0; i < tp->irq_cnt; i++) {
8542                 struct tg3_napi *tnapi = &tp->napi[i];
8543
8544                 if (tnapi->hw_status) {
8545                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8546                                           tnapi->hw_status,
8547                                           tnapi->status_mapping);
8548                         tnapi->hw_status = NULL;
8549                 }
8550         }
8551
8552         tg3_mem_rx_release(tp);
8553         tg3_mem_tx_release(tp);
8554
8555         if (tp->hw_stats) {
8556                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8557                                   tp->hw_stats, tp->stats_mapping);
8558                 tp->hw_stats = NULL;
8559         }
8560 }
8561
8562 /*
8563  * Must not be invoked with interrupt sources disabled and
8564  * the hardware shutdown down.  Can sleep.
8565  */
8566 static int tg3_alloc_consistent(struct tg3 *tp)
8567 {
8568         int i;
8569
8570         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8571                                           sizeof(struct tg3_hw_stats),
8572                                           &tp->stats_mapping,
8573                                           GFP_KERNEL | __GFP_ZERO);
8574         if (!tp->hw_stats)
8575                 goto err_out;
8576
8577         for (i = 0; i < tp->irq_cnt; i++) {
8578                 struct tg3_napi *tnapi = &tp->napi[i];
8579                 struct tg3_hw_status *sblk;
8580
8581                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8582                                                       TG3_HW_STATUS_SIZE,
8583                                                       &tnapi->status_mapping,
8584                                                       GFP_KERNEL | __GFP_ZERO);
8585                 if (!tnapi->hw_status)
8586                         goto err_out;
8587
8588                 sblk = tnapi->hw_status;
8589
8590                 if (tg3_flag(tp, ENABLE_RSS)) {
8591                         u16 *prodptr = NULL;
8592
8593                         /*
8594                          * When RSS is enabled, the status block format changes
8595                          * slightly.  The "rx_jumbo_consumer", "reserved",
8596                          * and "rx_mini_consumer" members get mapped to the
8597                          * other three rx return ring producer indexes.
8598                          */
8599                         switch (i) {
8600                         case 1:
8601                                 prodptr = &sblk->idx[0].rx_producer;
8602                                 break;
8603                         case 2:
8604                                 prodptr = &sblk->rx_jumbo_consumer;
8605                                 break;
8606                         case 3:
8607                                 prodptr = &sblk->reserved;
8608                                 break;
8609                         case 4:
8610                                 prodptr = &sblk->rx_mini_consumer;
8611                                 break;
8612                         }
8613                         tnapi->rx_rcb_prod_idx = prodptr;
8614                 } else {
8615                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8616                 }
8617         }
8618
8619         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8620                 goto err_out;
8621
8622         return 0;
8623
8624 err_out:
8625         tg3_free_consistent(tp);
8626         return -ENOMEM;
8627 }
8628
8629 #define MAX_WAIT_CNT 1000
8630
8631 /* To stop a block, clear the enable bit and poll till it
8632  * clears.  tp->lock is held.
8633  */
8634 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8635 {
8636         unsigned int i;
8637         u32 val;
8638
8639         if (tg3_flag(tp, 5705_PLUS)) {
8640                 switch (ofs) {
8641                 case RCVLSC_MODE:
8642                 case DMAC_MODE:
8643                 case MBFREE_MODE:
8644                 case BUFMGR_MODE:
8645                 case MEMARB_MODE:
8646                         /* We can't enable/disable these bits of the
8647                          * 5705/5750, just say success.
8648                          */
8649                         return 0;
8650
8651                 default:
8652                         break;
8653                 }
8654         }
8655
8656         val = tr32(ofs);
8657         val &= ~enable_bit;
8658         tw32_f(ofs, val);
8659
8660         for (i = 0; i < MAX_WAIT_CNT; i++) {
8661                 udelay(100);
8662                 val = tr32(ofs);
8663                 if ((val & enable_bit) == 0)
8664                         break;
8665         }
8666
8667         if (i == MAX_WAIT_CNT && !silent) {
8668                 dev_err(&tp->pdev->dev,
8669                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8670                         ofs, enable_bit);
8671                 return -ENODEV;
8672         }
8673
8674         return 0;
8675 }
8676
8677 /* tp->lock is held. */
8678 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8679 {
8680         int i, err;
8681
8682         tg3_disable_ints(tp);
8683
8684         tp->rx_mode &= ~RX_MODE_ENABLE;
8685         tw32_f(MAC_RX_MODE, tp->rx_mode);
8686         udelay(10);
8687
8688         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8689         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8690         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8691         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8692         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8693         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8694
8695         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8696         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8697         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8698         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8699         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8700         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8701         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8702
8703         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8704         tw32_f(MAC_MODE, tp->mac_mode);
8705         udelay(40);
8706
8707         tp->tx_mode &= ~TX_MODE_ENABLE;
8708         tw32_f(MAC_TX_MODE, tp->tx_mode);
8709
8710         for (i = 0; i < MAX_WAIT_CNT; i++) {
8711                 udelay(100);
8712                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8713                         break;
8714         }
8715         if (i >= MAX_WAIT_CNT) {
8716                 dev_err(&tp->pdev->dev,
8717                         "%s timed out, TX_MODE_ENABLE will not clear "
8718                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8719                 err |= -ENODEV;
8720         }
8721
8722         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8723         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8724         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8725
8726         tw32(FTQ_RESET, 0xffffffff);
8727         tw32(FTQ_RESET, 0x00000000);
8728
8729         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8730         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8731
8732         for (i = 0; i < tp->irq_cnt; i++) {
8733                 struct tg3_napi *tnapi = &tp->napi[i];
8734                 if (tnapi->hw_status)
8735                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8736         }
8737
8738         return err;
8739 }
8740
8741 /* Save PCI command register before chip reset */
8742 static void tg3_save_pci_state(struct tg3 *tp)
8743 {
8744         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8745 }
8746
8747 /* Restore PCI state after chip reset */
8748 static void tg3_restore_pci_state(struct tg3 *tp)
8749 {
8750         u32 val;
8751
8752         /* Re-enable indirect register accesses. */
8753         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8754                                tp->misc_host_ctrl);
8755
8756         /* Set MAX PCI retry to zero. */
8757         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8758         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8759             tg3_flag(tp, PCIX_MODE))
8760                 val |= PCISTATE_RETRY_SAME_DMA;
8761         /* Allow reads and writes to the APE register and memory space. */
8762         if (tg3_flag(tp, ENABLE_APE))
8763                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8764                        PCISTATE_ALLOW_APE_SHMEM_WR |
8765                        PCISTATE_ALLOW_APE_PSPACE_WR;
8766         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8767
8768         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8769
8770         if (!tg3_flag(tp, PCI_EXPRESS)) {
8771                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8772                                       tp->pci_cacheline_sz);
8773                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8774                                       tp->pci_lat_timer);
8775         }
8776
8777         /* Make sure PCI-X relaxed ordering bit is clear. */
8778         if (tg3_flag(tp, PCIX_MODE)) {
8779                 u16 pcix_cmd;
8780
8781                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8782                                      &pcix_cmd);
8783                 pcix_cmd &= ~PCI_X_CMD_ERO;
8784                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8785                                       pcix_cmd);
8786         }
8787
8788         if (tg3_flag(tp, 5780_CLASS)) {
8789
8790                 /* Chip reset on 5780 will reset MSI enable bit,
8791                  * so need to restore it.
8792                  */
8793                 if (tg3_flag(tp, USING_MSI)) {
8794                         u16 ctrl;
8795
8796                         pci_read_config_word(tp->pdev,
8797                                              tp->msi_cap + PCI_MSI_FLAGS,
8798                                              &ctrl);
8799                         pci_write_config_word(tp->pdev,
8800                                               tp->msi_cap + PCI_MSI_FLAGS,
8801                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8802                         val = tr32(MSGINT_MODE);
8803                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8804                 }
8805         }
8806 }
8807
8808 /* tp->lock is held. */
8809 static int tg3_chip_reset(struct tg3 *tp)
8810 {
8811         u32 val;
8812         void (*write_op)(struct tg3 *, u32, u32);
8813         int i, err;
8814
8815         tg3_nvram_lock(tp);
8816
8817         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8818
8819         /* No matching tg3_nvram_unlock() after this because
8820          * chip reset below will undo the nvram lock.
8821          */
8822         tp->nvram_lock_cnt = 0;
8823
8824         /* GRC_MISC_CFG core clock reset will clear the memory
8825          * enable bit in PCI register 4 and the MSI enable bit
8826          * on some chips, so we save relevant registers here.
8827          */
8828         tg3_save_pci_state(tp);
8829
8830         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8831             tg3_flag(tp, 5755_PLUS))
8832                 tw32(GRC_FASTBOOT_PC, 0);
8833
8834         /*
8835          * We must avoid the readl() that normally takes place.
8836          * It locks machines, causes machine checks, and other
8837          * fun things.  So, temporarily disable the 5701
8838          * hardware workaround, while we do the reset.
8839          */
8840         write_op = tp->write32;
8841         if (write_op == tg3_write_flush_reg32)
8842                 tp->write32 = tg3_write32;
8843
8844         /* Prevent the irq handler from reading or writing PCI registers
8845          * during chip reset when the memory enable bit in the PCI command
8846          * register may be cleared.  The chip does not generate interrupt
8847          * at this time, but the irq handler may still be called due to irq
8848          * sharing or irqpoll.
8849          */
8850         tg3_flag_set(tp, CHIP_RESETTING);
8851         for (i = 0; i < tp->irq_cnt; i++) {
8852                 struct tg3_napi *tnapi = &tp->napi[i];
8853                 if (tnapi->hw_status) {
8854                         tnapi->hw_status->status = 0;
8855                         tnapi->hw_status->status_tag = 0;
8856                 }
8857                 tnapi->last_tag = 0;
8858                 tnapi->last_irq_tag = 0;
8859         }
8860         smp_mb();
8861
8862         for (i = 0; i < tp->irq_cnt; i++)
8863                 synchronize_irq(tp->napi[i].irq_vec);
8864
8865         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8866                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8867                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8868         }
8869
8870         /* do the reset */
8871         val = GRC_MISC_CFG_CORECLK_RESET;
8872
8873         if (tg3_flag(tp, PCI_EXPRESS)) {
8874                 /* Force PCIe 1.0a mode */
8875                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8876                     !tg3_flag(tp, 57765_PLUS) &&
8877                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8878                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8879                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8880
8881                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8882                         tw32(GRC_MISC_CFG, (1 << 29));
8883                         val |= (1 << 29);
8884                 }
8885         }
8886
8887         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8888                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8889                 tw32(GRC_VCPU_EXT_CTRL,
8890                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8891         }
8892
8893         /* Manage gphy power for all CPMU absent PCIe devices. */
8894         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8895                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8896
8897         tw32(GRC_MISC_CFG, val);
8898
8899         /* restore 5701 hardware bug workaround write method */
8900         tp->write32 = write_op;
8901
8902         /* Unfortunately, we have to delay before the PCI read back.
8903          * Some 575X chips even will not respond to a PCI cfg access
8904          * when the reset command is given to the chip.
8905          *
8906          * How do these hardware designers expect things to work
8907          * properly if the PCI write is posted for a long period
8908          * of time?  It is always necessary to have some method by
8909          * which a register read back can occur to push the write
8910          * out which does the reset.
8911          *
8912          * For most tg3 variants the trick below was working.
8913          * Ho hum...
8914          */
8915         udelay(120);
8916
8917         /* Flush PCI posted writes.  The normal MMIO registers
8918          * are inaccessible at this time so this is the only
8919          * way to make this reliably (actually, this is no longer
8920          * the case, see above).  I tried to use indirect
8921          * register read/write but this upset some 5701 variants.
8922          */
8923         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8924
8925         udelay(120);
8926
8927         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8928                 u16 val16;
8929
8930                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8931                         int j;
8932                         u32 cfg_val;
8933
8934                         /* Wait for link training to complete.  */
8935                         for (j = 0; j < 5000; j++)
8936                                 udelay(100);
8937
8938                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8939                         pci_write_config_dword(tp->pdev, 0xc4,
8940                                                cfg_val | (1 << 15));
8941                 }
8942
8943                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8944                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8945                 /*
8946                  * Older PCIe devices only support the 128 byte
8947                  * MPS setting.  Enforce the restriction.
8948                  */
8949                 if (!tg3_flag(tp, CPMU_PRESENT))
8950                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8951                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8952
8953                 /* Clear error status */
8954                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8955                                       PCI_EXP_DEVSTA_CED |
8956                                       PCI_EXP_DEVSTA_NFED |
8957                                       PCI_EXP_DEVSTA_FED |
8958                                       PCI_EXP_DEVSTA_URD);
8959         }
8960
8961         tg3_restore_pci_state(tp);
8962
8963         tg3_flag_clear(tp, CHIP_RESETTING);
8964         tg3_flag_clear(tp, ERROR_PROCESSED);
8965
8966         val = 0;
8967         if (tg3_flag(tp, 5780_CLASS))
8968                 val = tr32(MEMARB_MODE);
8969         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8970
8971         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8972                 tg3_stop_fw(tp);
8973                 tw32(0x5000, 0x400);
8974         }
8975
8976         if (tg3_flag(tp, IS_SSB_CORE)) {
8977                 /*
8978                  * BCM4785: In order to avoid repercussions from using
8979                  * potentially defective internal ROM, stop the Rx RISC CPU,
8980                  * which is not required.
8981                  */
8982                 tg3_stop_fw(tp);
8983                 tg3_halt_cpu(tp, RX_CPU_BASE);
8984         }
8985
8986         err = tg3_poll_fw(tp);
8987         if (err)
8988                 return err;
8989
8990         tw32(GRC_MODE, tp->grc_mode);
8991
8992         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8993                 val = tr32(0xc4);
8994
8995                 tw32(0xc4, val | (1 << 15));
8996         }
8997
8998         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8999             tg3_asic_rev(tp) == ASIC_REV_5705) {
9000                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9001                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9002                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9003                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9004         }
9005
9006         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9007                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9008                 val = tp->mac_mode;
9009         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9010                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9011                 val = tp->mac_mode;
9012         } else
9013                 val = 0;
9014
9015         tw32_f(MAC_MODE, val);
9016         udelay(40);
9017
9018         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9019
9020         tg3_mdio_start(tp);
9021
9022         if (tg3_flag(tp, PCI_EXPRESS) &&
9023             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9024             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9025             !tg3_flag(tp, 57765_PLUS)) {
9026                 val = tr32(0x7c00);
9027
9028                 tw32(0x7c00, val | (1 << 25));
9029         }
9030
9031         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9032                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9033                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9034         }
9035
9036         /* Reprobe ASF enable state.  */
9037         tg3_flag_clear(tp, ENABLE_ASF);
9038         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9039                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9040
9041         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9042         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9043         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9044                 u32 nic_cfg;
9045
9046                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9047                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9048                         tg3_flag_set(tp, ENABLE_ASF);
9049                         tp->last_event_jiffies = jiffies;
9050                         if (tg3_flag(tp, 5750_PLUS))
9051                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9052
9053                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9054                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9055                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9056                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9057                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9058                 }
9059         }
9060
9061         return 0;
9062 }
9063
9064 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9065 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9066
9067 /* tp->lock is held. */
9068 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9069 {
9070         int err;
9071
9072         tg3_stop_fw(tp);
9073
9074         tg3_write_sig_pre_reset(tp, kind);
9075
9076         tg3_abort_hw(tp, silent);
9077         err = tg3_chip_reset(tp);
9078
9079         __tg3_set_mac_addr(tp, false);
9080
9081         tg3_write_sig_legacy(tp, kind);
9082         tg3_write_sig_post_reset(tp, kind);
9083
9084         if (tp->hw_stats) {
9085                 /* Save the stats across chip resets... */
9086                 tg3_get_nstats(tp, &tp->net_stats_prev);
9087                 tg3_get_estats(tp, &tp->estats_prev);
9088
9089                 /* And make sure the next sample is new data */
9090                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9091         }
9092
9093         if (err)
9094                 return err;
9095
9096         return 0;
9097 }
9098
9099 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9100 {
9101         struct tg3 *tp = netdev_priv(dev);
9102         struct sockaddr *addr = p;
9103         int err = 0;
9104         bool skip_mac_1 = false;
9105
9106         if (!is_valid_ether_addr(addr->sa_data))
9107                 return -EADDRNOTAVAIL;
9108
9109         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9110
9111         if (!netif_running(dev))
9112                 return 0;
9113
9114         if (tg3_flag(tp, ENABLE_ASF)) {
9115                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9116
9117                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9118                 addr0_low = tr32(MAC_ADDR_0_LOW);
9119                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9120                 addr1_low = tr32(MAC_ADDR_1_LOW);
9121
9122                 /* Skip MAC addr 1 if ASF is using it. */
9123                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9124                     !(addr1_high == 0 && addr1_low == 0))
9125                         skip_mac_1 = true;
9126         }
9127         spin_lock_bh(&tp->lock);
9128         __tg3_set_mac_addr(tp, skip_mac_1);
9129         spin_unlock_bh(&tp->lock);
9130
9131         return err;
9132 }
9133
9134 /* tp->lock is held. */
9135 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9136                            dma_addr_t mapping, u32 maxlen_flags,
9137                            u32 nic_addr)
9138 {
9139         tg3_write_mem(tp,
9140                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9141                       ((u64) mapping >> 32));
9142         tg3_write_mem(tp,
9143                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9144                       ((u64) mapping & 0xffffffff));
9145         tg3_write_mem(tp,
9146                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9147                        maxlen_flags);
9148
9149         if (!tg3_flag(tp, 5705_PLUS))
9150                 tg3_write_mem(tp,
9151                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9152                               nic_addr);
9153 }
9154
9155
9156 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9157 {
9158         int i = 0;
9159
9160         if (!tg3_flag(tp, ENABLE_TSS)) {
9161                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9162                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9163                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9164         } else {
9165                 tw32(HOSTCC_TXCOL_TICKS, 0);
9166                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9167                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9168
9169                 for (; i < tp->txq_cnt; i++) {
9170                         u32 reg;
9171
9172                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9173                         tw32(reg, ec->tx_coalesce_usecs);
9174                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9175                         tw32(reg, ec->tx_max_coalesced_frames);
9176                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9177                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9178                 }
9179         }
9180
9181         for (; i < tp->irq_max - 1; i++) {
9182                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9183                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9184                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9185         }
9186 }
9187
9188 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9189 {
9190         int i = 0;
9191         u32 limit = tp->rxq_cnt;
9192
9193         if (!tg3_flag(tp, ENABLE_RSS)) {
9194                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9195                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9196                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9197                 limit--;
9198         } else {
9199                 tw32(HOSTCC_RXCOL_TICKS, 0);
9200                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9201                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9202         }
9203
9204         for (; i < limit; i++) {
9205                 u32 reg;
9206
9207                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9208                 tw32(reg, ec->rx_coalesce_usecs);
9209                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9210                 tw32(reg, ec->rx_max_coalesced_frames);
9211                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9212                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9213         }
9214
9215         for (; i < tp->irq_max - 1; i++) {
9216                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9217                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9218                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9219         }
9220 }
9221
9222 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9223 {
9224         tg3_coal_tx_init(tp, ec);
9225         tg3_coal_rx_init(tp, ec);
9226
9227         if (!tg3_flag(tp, 5705_PLUS)) {
9228                 u32 val = ec->stats_block_coalesce_usecs;
9229
9230                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9231                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9232
9233                 if (!tp->link_up)
9234                         val = 0;
9235
9236                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9237         }
9238 }
9239
9240 /* tp->lock is held. */
9241 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9242 {
9243         u32 txrcb, limit;
9244
9245         /* Disable all transmit rings but the first. */
9246         if (!tg3_flag(tp, 5705_PLUS))
9247                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9248         else if (tg3_flag(tp, 5717_PLUS))
9249                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9250         else if (tg3_flag(tp, 57765_CLASS) ||
9251                  tg3_asic_rev(tp) == ASIC_REV_5762)
9252                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9253         else
9254                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9255
9256         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9257              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9258                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259                               BDINFO_FLAGS_DISABLED);
9260 }
9261
9262 /* tp->lock is held. */
9263 static void tg3_tx_rcbs_init(struct tg3 *tp)
9264 {
9265         int i = 0;
9266         u32 txrcb = NIC_SRAM_SEND_RCB;
9267
9268         if (tg3_flag(tp, ENABLE_TSS))
9269                 i++;
9270
9271         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9272                 struct tg3_napi *tnapi = &tp->napi[i];
9273
9274                 if (!tnapi->tx_ring)
9275                         continue;
9276
9277                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9278                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9279                                NIC_SRAM_TX_BUFFER_DESC);
9280         }
9281 }
9282
9283 /* tp->lock is held. */
9284 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9285 {
9286         u32 rxrcb, limit;
9287
9288         /* Disable all receive return rings but the first. */
9289         if (tg3_flag(tp, 5717_PLUS))
9290                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9291         else if (!tg3_flag(tp, 5705_PLUS))
9292                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9293         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9294                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9295                  tg3_flag(tp, 57765_CLASS))
9296                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9297         else
9298                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9299
9300         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9301              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9302                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9303                               BDINFO_FLAGS_DISABLED);
9304 }
9305
9306 /* tp->lock is held. */
9307 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9308 {
9309         int i = 0;
9310         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9311
9312         if (tg3_flag(tp, ENABLE_RSS))
9313                 i++;
9314
9315         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9316                 struct tg3_napi *tnapi = &tp->napi[i];
9317
9318                 if (!tnapi->rx_rcb)
9319                         continue;
9320
9321                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9322                                (tp->rx_ret_ring_mask + 1) <<
9323                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9324         }
9325 }
9326
9327 /* tp->lock is held. */
9328 static void tg3_rings_reset(struct tg3 *tp)
9329 {
9330         int i;
9331         u32 stblk;
9332         struct tg3_napi *tnapi = &tp->napi[0];
9333
9334         tg3_tx_rcbs_disable(tp);
9335
9336         tg3_rx_ret_rcbs_disable(tp);
9337
9338         /* Disable interrupts */
9339         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9340         tp->napi[0].chk_msi_cnt = 0;
9341         tp->napi[0].last_rx_cons = 0;
9342         tp->napi[0].last_tx_cons = 0;
9343
9344         /* Zero mailbox registers. */
9345         if (tg3_flag(tp, SUPPORT_MSIX)) {
9346                 for (i = 1; i < tp->irq_max; i++) {
9347                         tp->napi[i].tx_prod = 0;
9348                         tp->napi[i].tx_cons = 0;
9349                         if (tg3_flag(tp, ENABLE_TSS))
9350                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9351                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9352                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9353                         tp->napi[i].chk_msi_cnt = 0;
9354                         tp->napi[i].last_rx_cons = 0;
9355                         tp->napi[i].last_tx_cons = 0;
9356                 }
9357                 if (!tg3_flag(tp, ENABLE_TSS))
9358                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9359         } else {
9360                 tp->napi[0].tx_prod = 0;
9361                 tp->napi[0].tx_cons = 0;
9362                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9363                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9364         }
9365
9366         /* Make sure the NIC-based send BD rings are disabled. */
9367         if (!tg3_flag(tp, 5705_PLUS)) {
9368                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9369                 for (i = 0; i < 16; i++)
9370                         tw32_tx_mbox(mbox + i * 8, 0);
9371         }
9372
9373         /* Clear status block in ram. */
9374         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9375
9376         /* Set status block DMA address */
9377         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9378              ((u64) tnapi->status_mapping >> 32));
9379         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9380              ((u64) tnapi->status_mapping & 0xffffffff));
9381
9382         stblk = HOSTCC_STATBLCK_RING1;
9383
9384         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9385                 u64 mapping = (u64)tnapi->status_mapping;
9386                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9387                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9388                 stblk += 8;
9389
9390                 /* Clear status block in ram. */
9391                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9392         }
9393
9394         tg3_tx_rcbs_init(tp);
9395         tg3_rx_ret_rcbs_init(tp);
9396 }
9397
9398 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9399 {
9400         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9401
9402         if (!tg3_flag(tp, 5750_PLUS) ||
9403             tg3_flag(tp, 5780_CLASS) ||
9404             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9405             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9406             tg3_flag(tp, 57765_PLUS))
9407                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9408         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9409                  tg3_asic_rev(tp) == ASIC_REV_5787)
9410                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9411         else
9412                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9413
9414         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9415         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9416
9417         val = min(nic_rep_thresh, host_rep_thresh);
9418         tw32(RCVBDI_STD_THRESH, val);
9419
9420         if (tg3_flag(tp, 57765_PLUS))
9421                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9422
9423         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9424                 return;
9425
9426         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9427
9428         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9429
9430         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9431         tw32(RCVBDI_JUMBO_THRESH, val);
9432
9433         if (tg3_flag(tp, 57765_PLUS))
9434                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9435 }
9436
9437 static inline u32 calc_crc(unsigned char *buf, int len)
9438 {
9439         u32 reg;
9440         u32 tmp;
9441         int j, k;
9442
9443         reg = 0xffffffff;
9444
9445         for (j = 0; j < len; j++) {
9446                 reg ^= buf[j];
9447
9448                 for (k = 0; k < 8; k++) {
9449                         tmp = reg & 0x01;
9450
9451                         reg >>= 1;
9452
9453                         if (tmp)
9454                                 reg ^= 0xedb88320;
9455                 }
9456         }
9457
9458         return ~reg;
9459 }
9460
9461 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9462 {
9463         /* accept or reject all multicast frames */
9464         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9465         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9466         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9467         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9468 }
9469
9470 static void __tg3_set_rx_mode(struct net_device *dev)
9471 {
9472         struct tg3 *tp = netdev_priv(dev);
9473         u32 rx_mode;
9474
9475         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9476                                   RX_MODE_KEEP_VLAN_TAG);
9477
9478 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9479         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9480          * flag clear.
9481          */
9482         if (!tg3_flag(tp, ENABLE_ASF))
9483                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9484 #endif
9485
9486         if (dev->flags & IFF_PROMISC) {
9487                 /* Promiscuous mode. */
9488                 rx_mode |= RX_MODE_PROMISC;
9489         } else if (dev->flags & IFF_ALLMULTI) {
9490                 /* Accept all multicast. */
9491                 tg3_set_multi(tp, 1);
9492         } else if (netdev_mc_empty(dev)) {
9493                 /* Reject all multicast. */
9494                 tg3_set_multi(tp, 0);
9495         } else {
9496                 /* Accept one or more multicast(s). */
9497                 struct netdev_hw_addr *ha;
9498                 u32 mc_filter[4] = { 0, };
9499                 u32 regidx;
9500                 u32 bit;
9501                 u32 crc;
9502
9503                 netdev_for_each_mc_addr(ha, dev) {
9504                         crc = calc_crc(ha->addr, ETH_ALEN);
9505                         bit = ~crc & 0x7f;
9506                         regidx = (bit & 0x60) >> 5;
9507                         bit &= 0x1f;
9508                         mc_filter[regidx] |= (1 << bit);
9509                 }
9510
9511                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9512                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9513                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9514                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9515         }
9516
9517         if (rx_mode != tp->rx_mode) {
9518                 tp->rx_mode = rx_mode;
9519                 tw32_f(MAC_RX_MODE, rx_mode);
9520                 udelay(10);
9521         }
9522 }
9523
9524 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9525 {
9526         int i;
9527
9528         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9529                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9530 }
9531
9532 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9533 {
9534         int i;
9535
9536         if (!tg3_flag(tp, SUPPORT_MSIX))
9537                 return;
9538
9539         if (tp->rxq_cnt == 1) {
9540                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9541                 return;
9542         }
9543
9544         /* Validate table against current IRQ count */
9545         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9546                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9547                         break;
9548         }
9549
9550         if (i != TG3_RSS_INDIR_TBL_SIZE)
9551                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9552 }
9553
9554 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9555 {
9556         int i = 0;
9557         u32 reg = MAC_RSS_INDIR_TBL_0;
9558
9559         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9560                 u32 val = tp->rss_ind_tbl[i];
9561                 i++;
9562                 for (; i % 8; i++) {
9563                         val <<= 4;
9564                         val |= tp->rss_ind_tbl[i];
9565                 }
9566                 tw32(reg, val);
9567                 reg += 4;
9568         }
9569 }
9570
9571 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9572 {
9573         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9574                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9575         else
9576                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9577 }
9578
9579 /* tp->lock is held. */
9580 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9581 {
9582         u32 val, rdmac_mode;
9583         int i, err, limit;
9584         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9585
9586         tg3_disable_ints(tp);
9587
9588         tg3_stop_fw(tp);
9589
9590         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9591
9592         if (tg3_flag(tp, INIT_COMPLETE))
9593                 tg3_abort_hw(tp, 1);
9594
9595         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9596             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9597                 tg3_phy_pull_config(tp);
9598                 tg3_eee_pull_config(tp, NULL);
9599                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9600         }
9601
9602         /* Enable MAC control of LPI */
9603         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9604                 tg3_setup_eee(tp);
9605
9606         if (reset_phy)
9607                 tg3_phy_reset(tp);
9608
9609         err = tg3_chip_reset(tp);
9610         if (err)
9611                 return err;
9612
9613         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9614
9615         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9616                 val = tr32(TG3_CPMU_CTRL);
9617                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9618                 tw32(TG3_CPMU_CTRL, val);
9619
9620                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9621                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9622                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9623                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9624
9625                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9626                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9627                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9628                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9629
9630                 val = tr32(TG3_CPMU_HST_ACC);
9631                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9632                 val |= CPMU_HST_ACC_MACCLK_6_25;
9633                 tw32(TG3_CPMU_HST_ACC, val);
9634         }
9635
9636         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9637                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9638                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9639                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9640                 tw32(PCIE_PWR_MGMT_THRESH, val);
9641
9642                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9643                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9644
9645                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9646
9647                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9648                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9649         }
9650
9651         if (tg3_flag(tp, L1PLLPD_EN)) {
9652                 u32 grc_mode = tr32(GRC_MODE);
9653
9654                 /* Access the lower 1K of PL PCIE block registers. */
9655                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9656                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9657
9658                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9659                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9660                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9661
9662                 tw32(GRC_MODE, grc_mode);
9663         }
9664
9665         if (tg3_flag(tp, 57765_CLASS)) {
9666                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9667                         u32 grc_mode = tr32(GRC_MODE);
9668
9669                         /* Access the lower 1K of PL PCIE block registers. */
9670                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9671                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9672
9673                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9674                                    TG3_PCIE_PL_LO_PHYCTL5);
9675                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9676                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9677
9678                         tw32(GRC_MODE, grc_mode);
9679                 }
9680
9681                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9682                         u32 grc_mode;
9683
9684                         /* Fix transmit hangs */
9685                         val = tr32(TG3_CPMU_PADRNG_CTL);
9686                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9687                         tw32(TG3_CPMU_PADRNG_CTL, val);
9688
9689                         grc_mode = tr32(GRC_MODE);
9690
9691                         /* Access the lower 1K of DL PCIE block registers. */
9692                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9693                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9694
9695                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9696                                    TG3_PCIE_DL_LO_FTSMAX);
9697                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9698                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9699                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9700
9701                         tw32(GRC_MODE, grc_mode);
9702                 }
9703
9704                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9705                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9706                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9707                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9708         }
9709
9710         /* This works around an issue with Athlon chipsets on
9711          * B3 tigon3 silicon.  This bit has no effect on any
9712          * other revision.  But do not set this on PCI Express
9713          * chips and don't even touch the clocks if the CPMU is present.
9714          */
9715         if (!tg3_flag(tp, CPMU_PRESENT)) {
9716                 if (!tg3_flag(tp, PCI_EXPRESS))
9717                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9718                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9719         }
9720
9721         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9722             tg3_flag(tp, PCIX_MODE)) {
9723                 val = tr32(TG3PCI_PCISTATE);
9724                 val |= PCISTATE_RETRY_SAME_DMA;
9725                 tw32(TG3PCI_PCISTATE, val);
9726         }
9727
9728         if (tg3_flag(tp, ENABLE_APE)) {
9729                 /* Allow reads and writes to the
9730                  * APE register and memory space.
9731                  */
9732                 val = tr32(TG3PCI_PCISTATE);
9733                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9734                        PCISTATE_ALLOW_APE_SHMEM_WR |
9735                        PCISTATE_ALLOW_APE_PSPACE_WR;
9736                 tw32(TG3PCI_PCISTATE, val);
9737         }
9738
9739         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9740                 /* Enable some hw fixes.  */
9741                 val = tr32(TG3PCI_MSI_DATA);
9742                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9743                 tw32(TG3PCI_MSI_DATA, val);
9744         }
9745
9746         /* Descriptor ring init may make accesses to the
9747          * NIC SRAM area to setup the TX descriptors, so we
9748          * can only do this after the hardware has been
9749          * successfully reset.
9750          */
9751         err = tg3_init_rings(tp);
9752         if (err)
9753                 return err;
9754
9755         if (tg3_flag(tp, 57765_PLUS)) {
9756                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9757                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9758                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9759                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9760                 if (!tg3_flag(tp, 57765_CLASS) &&
9761                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9762                     tg3_asic_rev(tp) != ASIC_REV_5762)
9763                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9764                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9765         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9766                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9767                 /* This value is determined during the probe time DMA
9768                  * engine test, tg3_test_dma.
9769                  */
9770                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9771         }
9772
9773         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9774                           GRC_MODE_4X_NIC_SEND_RINGS |
9775                           GRC_MODE_NO_TX_PHDR_CSUM |
9776                           GRC_MODE_NO_RX_PHDR_CSUM);
9777         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9778
9779         /* Pseudo-header checksum is done by hardware logic and not
9780          * the offload processers, so make the chip do the pseudo-
9781          * header checksums on receive.  For transmit it is more
9782          * convenient to do the pseudo-header checksum in software
9783          * as Linux does that on transmit for us in all cases.
9784          */
9785         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9786
9787         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9788         if (tp->rxptpctl)
9789                 tw32(TG3_RX_PTP_CTL,
9790                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9791
9792         if (tg3_flag(tp, PTP_CAPABLE))
9793                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9794
9795         tw32(GRC_MODE, tp->grc_mode | val);
9796
9797         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9798         val = tr32(GRC_MISC_CFG);
9799         val &= ~0xff;
9800         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9801         tw32(GRC_MISC_CFG, val);
9802
9803         /* Initialize MBUF/DESC pool. */
9804         if (tg3_flag(tp, 5750_PLUS)) {
9805                 /* Do nothing.  */
9806         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9807                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9808                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9809                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9810                 else
9811                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9812                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9813                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9814         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9815                 int fw_len;
9816
9817                 fw_len = tp->fw_len;
9818                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9819                 tw32(BUFMGR_MB_POOL_ADDR,
9820                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9821                 tw32(BUFMGR_MB_POOL_SIZE,
9822                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9823         }
9824
9825         if (tp->dev->mtu <= ETH_DATA_LEN) {
9826                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9827                      tp->bufmgr_config.mbuf_read_dma_low_water);
9828                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9829                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9830                 tw32(BUFMGR_MB_HIGH_WATER,
9831                      tp->bufmgr_config.mbuf_high_water);
9832         } else {
9833                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9834                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9835                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9836                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9837                 tw32(BUFMGR_MB_HIGH_WATER,
9838                      tp->bufmgr_config.mbuf_high_water_jumbo);
9839         }
9840         tw32(BUFMGR_DMA_LOW_WATER,
9841              tp->bufmgr_config.dma_low_water);
9842         tw32(BUFMGR_DMA_HIGH_WATER,
9843              tp->bufmgr_config.dma_high_water);
9844
9845         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9846         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9847                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9848         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9849             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9850             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9851                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9852         tw32(BUFMGR_MODE, val);
9853         for (i = 0; i < 2000; i++) {
9854                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9855                         break;
9856                 udelay(10);
9857         }
9858         if (i >= 2000) {
9859                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9860                 return -ENODEV;
9861         }
9862
9863         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9864                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9865
9866         tg3_setup_rxbd_thresholds(tp);
9867
9868         /* Initialize TG3_BDINFO's at:
9869          *  RCVDBDI_STD_BD:     standard eth size rx ring
9870          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9871          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9872          *
9873          * like so:
9874          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9875          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9876          *                              ring attribute flags
9877          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9878          *
9879          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9880          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9881          *
9882          * The size of each ring is fixed in the firmware, but the location is
9883          * configurable.
9884          */
9885         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9886              ((u64) tpr->rx_std_mapping >> 32));
9887         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9888              ((u64) tpr->rx_std_mapping & 0xffffffff));
9889         if (!tg3_flag(tp, 5717_PLUS))
9890                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9891                      NIC_SRAM_RX_BUFFER_DESC);
9892
9893         /* Disable the mini ring */
9894         if (!tg3_flag(tp, 5705_PLUS))
9895                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9896                      BDINFO_FLAGS_DISABLED);
9897
9898         /* Program the jumbo buffer descriptor ring control
9899          * blocks on those devices that have them.
9900          */
9901         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9902             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9903
9904                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9905                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9906                              ((u64) tpr->rx_jmb_mapping >> 32));
9907                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9908                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9909                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9910                               BDINFO_FLAGS_MAXLEN_SHIFT;
9911                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9912                              val | BDINFO_FLAGS_USE_EXT_RECV);
9913                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9914                             tg3_flag(tp, 57765_CLASS) ||
9915                             tg3_asic_rev(tp) == ASIC_REV_5762)
9916                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9917                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9918                 } else {
9919                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9920                              BDINFO_FLAGS_DISABLED);
9921                 }
9922
9923                 if (tg3_flag(tp, 57765_PLUS)) {
9924                         val = TG3_RX_STD_RING_SIZE(tp);
9925                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9926                         val |= (TG3_RX_STD_DMA_SZ << 2);
9927                 } else
9928                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9929         } else
9930                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9931
9932         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9933
9934         tpr->rx_std_prod_idx = tp->rx_pending;
9935         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9936
9937         tpr->rx_jmb_prod_idx =
9938                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9939         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9940
9941         tg3_rings_reset(tp);
9942
9943         /* Initialize MAC address and backoff seed. */
9944         __tg3_set_mac_addr(tp, false);
9945
9946         /* MTU + ethernet header + FCS + optional VLAN tag */
9947         tw32(MAC_RX_MTU_SIZE,
9948              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9949
9950         /* The slot time is changed by tg3_setup_phy if we
9951          * run at gigabit with half duplex.
9952          */
9953         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9954               (6 << TX_LENGTHS_IPG_SHIFT) |
9955               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9956
9957         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9958             tg3_asic_rev(tp) == ASIC_REV_5762)
9959                 val |= tr32(MAC_TX_LENGTHS) &
9960                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9961                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9962
9963         tw32(MAC_TX_LENGTHS, val);
9964
9965         /* Receive rules. */
9966         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9967         tw32(RCVLPC_CONFIG, 0x0181);
9968
9969         /* Calculate RDMAC_MODE setting early, we need it to determine
9970          * the RCVLPC_STATE_ENABLE mask.
9971          */
9972         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9973                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9974                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9975                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9976                       RDMAC_MODE_LNGREAD_ENAB);
9977
9978         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9979                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9980
9981         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9982             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9983             tg3_asic_rev(tp) == ASIC_REV_57780)
9984                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9985                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9986                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9987
9988         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9989             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9990                 if (tg3_flag(tp, TSO_CAPABLE) &&
9991                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9992                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9993                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9994                            !tg3_flag(tp, IS_5788)) {
9995                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9996                 }
9997         }
9998
9999         if (tg3_flag(tp, PCI_EXPRESS))
10000                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10001
10002         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10003                 tp->dma_limit = 0;
10004                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10005                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10006                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10007                 }
10008         }
10009
10010         if (tg3_flag(tp, HW_TSO_1) ||
10011             tg3_flag(tp, HW_TSO_2) ||
10012             tg3_flag(tp, HW_TSO_3))
10013                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10014
10015         if (tg3_flag(tp, 57765_PLUS) ||
10016             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10017             tg3_asic_rev(tp) == ASIC_REV_57780)
10018                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10019
10020         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10021             tg3_asic_rev(tp) == ASIC_REV_5762)
10022                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10023
10024         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10025             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10026             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10027             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10028             tg3_flag(tp, 57765_PLUS)) {
10029                 u32 tgtreg;
10030
10031                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10032                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10033                 else
10034                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10035
10036                 val = tr32(tgtreg);
10037                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10038                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10039                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10040                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10041                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10042                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10043                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10044                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10045                 }
10046                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10047         }
10048
10049         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10050             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10051             tg3_asic_rev(tp) == ASIC_REV_5762) {
10052                 u32 tgtreg;
10053
10054                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10055                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10056                 else
10057                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10058
10059                 val = tr32(tgtreg);
10060                 tw32(tgtreg, val |
10061                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10062                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10063         }
10064
10065         /* Receive/send statistics. */
10066         if (tg3_flag(tp, 5750_PLUS)) {
10067                 val = tr32(RCVLPC_STATS_ENABLE);
10068                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10069                 tw32(RCVLPC_STATS_ENABLE, val);
10070         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10071                    tg3_flag(tp, TSO_CAPABLE)) {
10072                 val = tr32(RCVLPC_STATS_ENABLE);
10073                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10074                 tw32(RCVLPC_STATS_ENABLE, val);
10075         } else {
10076                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10077         }
10078         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10079         tw32(SNDDATAI_STATSENAB, 0xffffff);
10080         tw32(SNDDATAI_STATSCTRL,
10081              (SNDDATAI_SCTRL_ENABLE |
10082               SNDDATAI_SCTRL_FASTUPD));
10083
10084         /* Setup host coalescing engine. */
10085         tw32(HOSTCC_MODE, 0);
10086         for (i = 0; i < 2000; i++) {
10087                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10088                         break;
10089                 udelay(10);
10090         }
10091
10092         __tg3_set_coalesce(tp, &tp->coal);
10093
10094         if (!tg3_flag(tp, 5705_PLUS)) {
10095                 /* Status/statistics block address.  See tg3_timer,
10096                  * the tg3_periodic_fetch_stats call there, and
10097                  * tg3_get_stats to see how this works for 5705/5750 chips.
10098                  */
10099                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10100                      ((u64) tp->stats_mapping >> 32));
10101                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10102                      ((u64) tp->stats_mapping & 0xffffffff));
10103                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10104
10105                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10106
10107                 /* Clear statistics and status block memory areas */
10108                 for (i = NIC_SRAM_STATS_BLK;
10109                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10110                      i += sizeof(u32)) {
10111                         tg3_write_mem(tp, i, 0);
10112                         udelay(40);
10113                 }
10114         }
10115
10116         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10117
10118         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10119         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10120         if (!tg3_flag(tp, 5705_PLUS))
10121                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10122
10123         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10124                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10125                 /* reset to prevent losing 1st rx packet intermittently */
10126                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10127                 udelay(10);
10128         }
10129
10130         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10131                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10132                         MAC_MODE_FHDE_ENABLE;
10133         if (tg3_flag(tp, ENABLE_APE))
10134                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10135         if (!tg3_flag(tp, 5705_PLUS) &&
10136             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10137             tg3_asic_rev(tp) != ASIC_REV_5700)
10138                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10139         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10140         udelay(40);
10141
10142         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10143          * If TG3_FLAG_IS_NIC is zero, we should read the
10144          * register to preserve the GPIO settings for LOMs. The GPIOs,
10145          * whether used as inputs or outputs, are set by boot code after
10146          * reset.
10147          */
10148         if (!tg3_flag(tp, IS_NIC)) {
10149                 u32 gpio_mask;
10150
10151                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10152                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10153                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10154
10155                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10156                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10157                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10158
10159                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10160                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10161
10162                 tp->grc_local_ctrl &= ~gpio_mask;
10163                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10164
10165                 /* GPIO1 must be driven high for eeprom write protect */
10166                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10167                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10168                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10169         }
10170         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10171         udelay(100);
10172
10173         if (tg3_flag(tp, USING_MSIX)) {
10174                 val = tr32(MSGINT_MODE);
10175                 val |= MSGINT_MODE_ENABLE;
10176                 if (tp->irq_cnt > 1)
10177                         val |= MSGINT_MODE_MULTIVEC_EN;
10178                 if (!tg3_flag(tp, 1SHOT_MSI))
10179                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10180                 tw32(MSGINT_MODE, val);
10181         }
10182
10183         if (!tg3_flag(tp, 5705_PLUS)) {
10184                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10185                 udelay(40);
10186         }
10187
10188         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10189                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10190                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10191                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10192                WDMAC_MODE_LNGREAD_ENAB);
10193
10194         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10195             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10196                 if (tg3_flag(tp, TSO_CAPABLE) &&
10197                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10198                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10199                         /* nothing */
10200                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10201                            !tg3_flag(tp, IS_5788)) {
10202                         val |= WDMAC_MODE_RX_ACCEL;
10203                 }
10204         }
10205
10206         /* Enable host coalescing bug fix */
10207         if (tg3_flag(tp, 5755_PLUS))
10208                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10209
10210         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10211                 val |= WDMAC_MODE_BURST_ALL_DATA;
10212
10213         tw32_f(WDMAC_MODE, val);
10214         udelay(40);
10215
10216         if (tg3_flag(tp, PCIX_MODE)) {
10217                 u16 pcix_cmd;
10218
10219                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10220                                      &pcix_cmd);
10221                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10222                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10223                         pcix_cmd |= PCI_X_CMD_READ_2K;
10224                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10225                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10226                         pcix_cmd |= PCI_X_CMD_READ_2K;
10227                 }
10228                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10229                                       pcix_cmd);
10230         }
10231
10232         tw32_f(RDMAC_MODE, rdmac_mode);
10233         udelay(40);
10234
10235         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10236             tg3_asic_rev(tp) == ASIC_REV_5720) {
10237                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10238                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10239                                 break;
10240                 }
10241                 if (i < TG3_NUM_RDMA_CHANNELS) {
10242                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10243                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10244                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10245                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10246                 }
10247         }
10248
10249         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10250         if (!tg3_flag(tp, 5705_PLUS))
10251                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10252
10253         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10254                 tw32(SNDDATAC_MODE,
10255                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10256         else
10257                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10258
10259         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10260         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10261         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10262         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10263                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10264         tw32(RCVDBDI_MODE, val);
10265         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10266         if (tg3_flag(tp, HW_TSO_1) ||
10267             tg3_flag(tp, HW_TSO_2) ||
10268             tg3_flag(tp, HW_TSO_3))
10269                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10270         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10271         if (tg3_flag(tp, ENABLE_TSS))
10272                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10273         tw32(SNDBDI_MODE, val);
10274         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10275
10276         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10277                 err = tg3_load_5701_a0_firmware_fix(tp);
10278                 if (err)
10279                         return err;
10280         }
10281
10282         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10283                 /* Ignore any errors for the firmware download. If download
10284                  * fails, the device will operate with EEE disabled
10285                  */
10286                 tg3_load_57766_firmware(tp);
10287         }
10288
10289         if (tg3_flag(tp, TSO_CAPABLE)) {
10290                 err = tg3_load_tso_firmware(tp);
10291                 if (err)
10292                         return err;
10293         }
10294
10295         tp->tx_mode = TX_MODE_ENABLE;
10296
10297         if (tg3_flag(tp, 5755_PLUS) ||
10298             tg3_asic_rev(tp) == ASIC_REV_5906)
10299                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10300
10301         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10302             tg3_asic_rev(tp) == ASIC_REV_5762) {
10303                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10304                 tp->tx_mode &= ~val;
10305                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10306         }
10307
10308         tw32_f(MAC_TX_MODE, tp->tx_mode);
10309         udelay(100);
10310
10311         if (tg3_flag(tp, ENABLE_RSS)) {
10312                 tg3_rss_write_indir_tbl(tp);
10313
10314                 /* Setup the "secret" hash key. */
10315                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10316                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10317                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10318                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10319                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10320                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10321                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10322                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10323                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10324                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10325         }
10326
10327         tp->rx_mode = RX_MODE_ENABLE;
10328         if (tg3_flag(tp, 5755_PLUS))
10329                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10330
10331         if (tg3_flag(tp, ENABLE_RSS))
10332                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10333                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10334                                RX_MODE_RSS_IPV6_HASH_EN |
10335                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10336                                RX_MODE_RSS_IPV4_HASH_EN |
10337                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10338
10339         tw32_f(MAC_RX_MODE, tp->rx_mode);
10340         udelay(10);
10341
10342         tw32(MAC_LED_CTRL, tp->led_ctrl);
10343
10344         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10345         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10346                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10347                 udelay(10);
10348         }
10349         tw32_f(MAC_RX_MODE, tp->rx_mode);
10350         udelay(10);
10351
10352         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10353                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10354                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10355                         /* Set drive transmission level to 1.2V  */
10356                         /* only if the signal pre-emphasis bit is not set  */
10357                         val = tr32(MAC_SERDES_CFG);
10358                         val &= 0xfffff000;
10359                         val |= 0x880;
10360                         tw32(MAC_SERDES_CFG, val);
10361                 }
10362                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10363                         tw32(MAC_SERDES_CFG, 0x616000);
10364         }
10365
10366         /* Prevent chip from dropping frames when flow control
10367          * is enabled.
10368          */
10369         if (tg3_flag(tp, 57765_CLASS))
10370                 val = 1;
10371         else
10372                 val = 2;
10373         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10374
10375         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10376             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10377                 /* Use hardware link auto-negotiation */
10378                 tg3_flag_set(tp, HW_AUTONEG);
10379         }
10380
10381         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10382             tg3_asic_rev(tp) == ASIC_REV_5714) {
10383                 u32 tmp;
10384
10385                 tmp = tr32(SERDES_RX_CTRL);
10386                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10387                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10388                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10389                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10390         }
10391
10392         if (!tg3_flag(tp, USE_PHYLIB)) {
10393                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10394                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10395
10396                 err = tg3_setup_phy(tp, false);
10397                 if (err)
10398                         return err;
10399
10400                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10401                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10402                         u32 tmp;
10403
10404                         /* Clear CRC stats. */
10405                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10406                                 tg3_writephy(tp, MII_TG3_TEST1,
10407                                              tmp | MII_TG3_TEST1_CRC_EN);
10408                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10409                         }
10410                 }
10411         }
10412
10413         __tg3_set_rx_mode(tp->dev);
10414
10415         /* Initialize receive rules. */
10416         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10417         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10418         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10419         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10420
10421         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10422                 limit = 8;
10423         else
10424                 limit = 16;
10425         if (tg3_flag(tp, ENABLE_ASF))
10426                 limit -= 4;
10427         switch (limit) {
10428         case 16:
10429                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10430         case 15:
10431                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10432         case 14:
10433                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10434         case 13:
10435                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10436         case 12:
10437                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10438         case 11:
10439                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10440         case 10:
10441                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10442         case 9:
10443                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10444         case 8:
10445                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10446         case 7:
10447                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10448         case 6:
10449                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10450         case 5:
10451                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10452         case 4:
10453                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10454         case 3:
10455                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10456         case 2:
10457         case 1:
10458
10459         default:
10460                 break;
10461         }
10462
10463         if (tg3_flag(tp, ENABLE_APE))
10464                 /* Write our heartbeat update interval to APE. */
10465                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10466                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10467
10468         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10469
10470         return 0;
10471 }
10472
10473 /* Called at device open time to get the chip ready for
10474  * packet processing.  Invoked with tp->lock held.
10475  */
10476 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10477 {
10478         tg3_switch_clocks(tp);
10479
10480         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10481
10482         return tg3_reset_hw(tp, reset_phy);
10483 }
10484
10485 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10486 {
10487         int i;
10488
10489         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10490                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10491
10492                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10493                 off += len;
10494
10495                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10496                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10497                         memset(ocir, 0, TG3_OCIR_LEN);
10498         }
10499 }
10500
10501 /* sysfs attributes for hwmon */
10502 static ssize_t tg3_show_temp(struct device *dev,
10503                              struct device_attribute *devattr, char *buf)
10504 {
10505         struct pci_dev *pdev = to_pci_dev(dev);
10506         struct net_device *netdev = pci_get_drvdata(pdev);
10507         struct tg3 *tp = netdev_priv(netdev);
10508         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10509         u32 temperature;
10510
10511         spin_lock_bh(&tp->lock);
10512         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10513                                 sizeof(temperature));
10514         spin_unlock_bh(&tp->lock);
10515         return sprintf(buf, "%u\n", temperature);
10516 }
10517
10518
10519 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10520                           TG3_TEMP_SENSOR_OFFSET);
10521 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10522                           TG3_TEMP_CAUTION_OFFSET);
10523 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10524                           TG3_TEMP_MAX_OFFSET);
10525
10526 static struct attribute *tg3_attributes[] = {
10527         &sensor_dev_attr_temp1_input.dev_attr.attr,
10528         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10529         &sensor_dev_attr_temp1_max.dev_attr.attr,
10530         NULL
10531 };
10532
10533 static const struct attribute_group tg3_group = {
10534         .attrs = tg3_attributes,
10535 };
10536
10537 static void tg3_hwmon_close(struct tg3 *tp)
10538 {
10539         if (tp->hwmon_dev) {
10540                 hwmon_device_unregister(tp->hwmon_dev);
10541                 tp->hwmon_dev = NULL;
10542                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10543         }
10544 }
10545
10546 static void tg3_hwmon_open(struct tg3 *tp)
10547 {
10548         int i, err;
10549         u32 size = 0;
10550         struct pci_dev *pdev = tp->pdev;
10551         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10552
10553         tg3_sd_scan_scratchpad(tp, ocirs);
10554
10555         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10556                 if (!ocirs[i].src_data_length)
10557                         continue;
10558
10559                 size += ocirs[i].src_hdr_length;
10560                 size += ocirs[i].src_data_length;
10561         }
10562
10563         if (!size)
10564                 return;
10565
10566         /* Register hwmon sysfs hooks */
10567         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10568         if (err) {
10569                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10570                 return;
10571         }
10572
10573         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10574         if (IS_ERR(tp->hwmon_dev)) {
10575                 tp->hwmon_dev = NULL;
10576                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10577                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10578         }
10579 }
10580
10581
10582 #define TG3_STAT_ADD32(PSTAT, REG) \
10583 do {    u32 __val = tr32(REG); \
10584         (PSTAT)->low += __val; \
10585         if ((PSTAT)->low < __val) \
10586                 (PSTAT)->high += 1; \
10587 } while (0)
10588
10589 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10590 {
10591         struct tg3_hw_stats *sp = tp->hw_stats;
10592
10593         if (!tp->link_up)
10594                 return;
10595
10596         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10597         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10598         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10599         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10600         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10601         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10602         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10603         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10604         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10605         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10606         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10607         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10608         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10609         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10610                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10611                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10612                 u32 val;
10613
10614                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10615                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10616                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10617                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10618         }
10619
10620         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10621         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10622         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10623         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10624         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10625         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10626         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10627         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10628         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10629         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10630         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10631         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10632         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10633         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10634
10635         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10636         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10637             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10638             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10639                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10640         } else {
10641                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10642                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10643                 if (val) {
10644                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10645                         sp->rx_discards.low += val;
10646                         if (sp->rx_discards.low < val)
10647                                 sp->rx_discards.high += 1;
10648                 }
10649                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10650         }
10651         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10652 }
10653
10654 static void tg3_chk_missed_msi(struct tg3 *tp)
10655 {
10656         u32 i;
10657
10658         for (i = 0; i < tp->irq_cnt; i++) {
10659                 struct tg3_napi *tnapi = &tp->napi[i];
10660
10661                 if (tg3_has_work(tnapi)) {
10662                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10663                             tnapi->last_tx_cons == tnapi->tx_cons) {
10664                                 if (tnapi->chk_msi_cnt < 1) {
10665                                         tnapi->chk_msi_cnt++;
10666                                         return;
10667                                 }
10668                                 tg3_msi(0, tnapi);
10669                         }
10670                 }
10671                 tnapi->chk_msi_cnt = 0;
10672                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10673                 tnapi->last_tx_cons = tnapi->tx_cons;
10674         }
10675 }
10676
10677 static void tg3_timer(unsigned long __opaque)
10678 {
10679         struct tg3 *tp = (struct tg3 *) __opaque;
10680
10681         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10682                 goto restart_timer;
10683
10684         spin_lock(&tp->lock);
10685
10686         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10687             tg3_flag(tp, 57765_CLASS))
10688                 tg3_chk_missed_msi(tp);
10689
10690         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10691                 /* BCM4785: Flush posted writes from GbE to host memory. */
10692                 tr32(HOSTCC_MODE);
10693         }
10694
10695         if (!tg3_flag(tp, TAGGED_STATUS)) {
10696                 /* All of this garbage is because when using non-tagged
10697                  * IRQ status the mailbox/status_block protocol the chip
10698                  * uses with the cpu is race prone.
10699                  */
10700                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10701                         tw32(GRC_LOCAL_CTRL,
10702                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10703                 } else {
10704                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10705                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10706                 }
10707
10708                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10709                         spin_unlock(&tp->lock);
10710                         tg3_reset_task_schedule(tp);
10711                         goto restart_timer;
10712                 }
10713         }
10714
10715         /* This part only runs once per second. */
10716         if (!--tp->timer_counter) {
10717                 if (tg3_flag(tp, 5705_PLUS))
10718                         tg3_periodic_fetch_stats(tp);
10719
10720                 if (tp->setlpicnt && !--tp->setlpicnt)
10721                         tg3_phy_eee_enable(tp);
10722
10723                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10724                         u32 mac_stat;
10725                         int phy_event;
10726
10727                         mac_stat = tr32(MAC_STATUS);
10728
10729                         phy_event = 0;
10730                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10731                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10732                                         phy_event = 1;
10733                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10734                                 phy_event = 1;
10735
10736                         if (phy_event)
10737                                 tg3_setup_phy(tp, false);
10738                 } else if (tg3_flag(tp, POLL_SERDES)) {
10739                         u32 mac_stat = tr32(MAC_STATUS);
10740                         int need_setup = 0;
10741
10742                         if (tp->link_up &&
10743                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10744                                 need_setup = 1;
10745                         }
10746                         if (!tp->link_up &&
10747                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10748                                          MAC_STATUS_SIGNAL_DET))) {
10749                                 need_setup = 1;
10750                         }
10751                         if (need_setup) {
10752                                 if (!tp->serdes_counter) {
10753                                         tw32_f(MAC_MODE,
10754                                              (tp->mac_mode &
10755                                               ~MAC_MODE_PORT_MODE_MASK));
10756                                         udelay(40);
10757                                         tw32_f(MAC_MODE, tp->mac_mode);
10758                                         udelay(40);
10759                                 }
10760                                 tg3_setup_phy(tp, false);
10761                         }
10762                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10763                            tg3_flag(tp, 5780_CLASS)) {
10764                         tg3_serdes_parallel_detect(tp);
10765                 }
10766
10767                 tp->timer_counter = tp->timer_multiplier;
10768         }
10769
10770         /* Heartbeat is only sent once every 2 seconds.
10771          *
10772          * The heartbeat is to tell the ASF firmware that the host
10773          * driver is still alive.  In the event that the OS crashes,
10774          * ASF needs to reset the hardware to free up the FIFO space
10775          * that may be filled with rx packets destined for the host.
10776          * If the FIFO is full, ASF will no longer function properly.
10777          *
10778          * Unintended resets have been reported on real time kernels
10779          * where the timer doesn't run on time.  Netpoll will also have
10780          * same problem.
10781          *
10782          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10783          * to check the ring condition when the heartbeat is expiring
10784          * before doing the reset.  This will prevent most unintended
10785          * resets.
10786          */
10787         if (!--tp->asf_counter) {
10788                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10789                         tg3_wait_for_event_ack(tp);
10790
10791                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10792                                       FWCMD_NICDRV_ALIVE3);
10793                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10794                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10795                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10796
10797                         tg3_generate_fw_event(tp);
10798                 }
10799                 tp->asf_counter = tp->asf_multiplier;
10800         }
10801
10802         spin_unlock(&tp->lock);
10803
10804 restart_timer:
10805         tp->timer.expires = jiffies + tp->timer_offset;
10806         add_timer(&tp->timer);
10807 }
10808
10809 static void tg3_timer_init(struct tg3 *tp)
10810 {
10811         if (tg3_flag(tp, TAGGED_STATUS) &&
10812             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10813             !tg3_flag(tp, 57765_CLASS))
10814                 tp->timer_offset = HZ;
10815         else
10816                 tp->timer_offset = HZ / 10;
10817
10818         BUG_ON(tp->timer_offset > HZ);
10819
10820         tp->timer_multiplier = (HZ / tp->timer_offset);
10821         tp->asf_multiplier = (HZ / tp->timer_offset) *
10822                              TG3_FW_UPDATE_FREQ_SEC;
10823
10824         init_timer(&tp->timer);
10825         tp->timer.data = (unsigned long) tp;
10826         tp->timer.function = tg3_timer;
10827 }
10828
10829 static void tg3_timer_start(struct tg3 *tp)
10830 {
10831         tp->asf_counter   = tp->asf_multiplier;
10832         tp->timer_counter = tp->timer_multiplier;
10833
10834         tp->timer.expires = jiffies + tp->timer_offset;
10835         add_timer(&tp->timer);
10836 }
10837
10838 static void tg3_timer_stop(struct tg3 *tp)
10839 {
10840         del_timer_sync(&tp->timer);
10841 }
10842
10843 /* Restart hardware after configuration changes, self-test, etc.
10844  * Invoked with tp->lock held.
10845  */
10846 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10847         __releases(tp->lock)
10848         __acquires(tp->lock)
10849 {
10850         int err;
10851
10852         err = tg3_init_hw(tp, reset_phy);
10853         if (err) {
10854                 netdev_err(tp->dev,
10855                            "Failed to re-initialize device, aborting\n");
10856                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10857                 tg3_full_unlock(tp);
10858                 tg3_timer_stop(tp);
10859                 tp->irq_sync = 0;
10860                 tg3_napi_enable(tp);
10861                 dev_close(tp->dev);
10862                 tg3_full_lock(tp, 0);
10863         }
10864         return err;
10865 }
10866
10867 static void tg3_reset_task(struct work_struct *work)
10868 {
10869         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10870         int err;
10871
10872         tg3_full_lock(tp, 0);
10873
10874         if (!netif_running(tp->dev)) {
10875                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10876                 tg3_full_unlock(tp);
10877                 return;
10878         }
10879
10880         tg3_full_unlock(tp);
10881
10882         tg3_phy_stop(tp);
10883
10884         tg3_netif_stop(tp);
10885
10886         tg3_full_lock(tp, 1);
10887
10888         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10889                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10890                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10891                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10892                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10893         }
10894
10895         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10896         err = tg3_init_hw(tp, true);
10897         if (err)
10898                 goto out;
10899
10900         tg3_netif_start(tp);
10901
10902 out:
10903         tg3_full_unlock(tp);
10904
10905         if (!err)
10906                 tg3_phy_start(tp);
10907
10908         tg3_flag_clear(tp, RESET_TASK_PENDING);
10909 }
10910
10911 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10912 {
10913         irq_handler_t fn;
10914         unsigned long flags;
10915         char *name;
10916         struct tg3_napi *tnapi = &tp->napi[irq_num];
10917
10918         if (tp->irq_cnt == 1)
10919                 name = tp->dev->name;
10920         else {
10921                 name = &tnapi->irq_lbl[0];
10922                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10923                 name[IFNAMSIZ-1] = 0;
10924         }
10925
10926         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10927                 fn = tg3_msi;
10928                 if (tg3_flag(tp, 1SHOT_MSI))
10929                         fn = tg3_msi_1shot;
10930                 flags = 0;
10931         } else {
10932                 fn = tg3_interrupt;
10933                 if (tg3_flag(tp, TAGGED_STATUS))
10934                         fn = tg3_interrupt_tagged;
10935                 flags = IRQF_SHARED;
10936         }
10937
10938         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10939 }
10940
10941 static int tg3_test_interrupt(struct tg3 *tp)
10942 {
10943         struct tg3_napi *tnapi = &tp->napi[0];
10944         struct net_device *dev = tp->dev;
10945         int err, i, intr_ok = 0;
10946         u32 val;
10947
10948         if (!netif_running(dev))
10949                 return -ENODEV;
10950
10951         tg3_disable_ints(tp);
10952
10953         free_irq(tnapi->irq_vec, tnapi);
10954
10955         /*
10956          * Turn off MSI one shot mode.  Otherwise this test has no
10957          * observable way to know whether the interrupt was delivered.
10958          */
10959         if (tg3_flag(tp, 57765_PLUS)) {
10960                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10961                 tw32(MSGINT_MODE, val);
10962         }
10963
10964         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10965                           IRQF_SHARED, dev->name, tnapi);
10966         if (err)
10967                 return err;
10968
10969         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10970         tg3_enable_ints(tp);
10971
10972         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10973                tnapi->coal_now);
10974
10975         for (i = 0; i < 5; i++) {
10976                 u32 int_mbox, misc_host_ctrl;
10977
10978                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10979                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10980
10981                 if ((int_mbox != 0) ||
10982                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10983                         intr_ok = 1;
10984                         break;
10985                 }
10986
10987                 if (tg3_flag(tp, 57765_PLUS) &&
10988                     tnapi->hw_status->status_tag != tnapi->last_tag)
10989                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10990
10991                 msleep(10);
10992         }
10993
10994         tg3_disable_ints(tp);
10995
10996         free_irq(tnapi->irq_vec, tnapi);
10997
10998         err = tg3_request_irq(tp, 0);
10999
11000         if (err)
11001                 return err;
11002
11003         if (intr_ok) {
11004                 /* Reenable MSI one shot mode. */
11005                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11006                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11007                         tw32(MSGINT_MODE, val);
11008                 }
11009                 return 0;
11010         }
11011
11012         return -EIO;
11013 }
11014
11015 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11016  * successfully restored
11017  */
11018 static int tg3_test_msi(struct tg3 *tp)
11019 {
11020         int err;
11021         u16 pci_cmd;
11022
11023         if (!tg3_flag(tp, USING_MSI))
11024                 return 0;
11025
11026         /* Turn off SERR reporting in case MSI terminates with Master
11027          * Abort.
11028          */
11029         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11030         pci_write_config_word(tp->pdev, PCI_COMMAND,
11031                               pci_cmd & ~PCI_COMMAND_SERR);
11032
11033         err = tg3_test_interrupt(tp);
11034
11035         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11036
11037         if (!err)
11038                 return 0;
11039
11040         /* other failures */
11041         if (err != -EIO)
11042                 return err;
11043
11044         /* MSI test failed, go back to INTx mode */
11045         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11046                     "to INTx mode. Please report this failure to the PCI "
11047                     "maintainer and include system chipset information\n");
11048
11049         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11050
11051         pci_disable_msi(tp->pdev);
11052
11053         tg3_flag_clear(tp, USING_MSI);
11054         tp->napi[0].irq_vec = tp->pdev->irq;
11055
11056         err = tg3_request_irq(tp, 0);
11057         if (err)
11058                 return err;
11059
11060         /* Need to reset the chip because the MSI cycle may have terminated
11061          * with Master Abort.
11062          */
11063         tg3_full_lock(tp, 1);
11064
11065         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11066         err = tg3_init_hw(tp, true);
11067
11068         tg3_full_unlock(tp);
11069
11070         if (err)
11071                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11072
11073         return err;
11074 }
11075
11076 static int tg3_request_firmware(struct tg3 *tp)
11077 {
11078         const struct tg3_firmware_hdr *fw_hdr;
11079
11080         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11081                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11082                            tp->fw_needed);
11083                 return -ENOENT;
11084         }
11085
11086         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11087
11088         /* Firmware blob starts with version numbers, followed by
11089          * start address and _full_ length including BSS sections
11090          * (which must be longer than the actual data, of course
11091          */
11092
11093         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11094         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11095                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11096                            tp->fw_len, tp->fw_needed);
11097                 release_firmware(tp->fw);
11098                 tp->fw = NULL;
11099                 return -EINVAL;
11100         }
11101
11102         /* We no longer need firmware; we have it. */
11103         tp->fw_needed = NULL;
11104         return 0;
11105 }
11106
11107 static u32 tg3_irq_count(struct tg3 *tp)
11108 {
11109         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11110
11111         if (irq_cnt > 1) {
11112                 /* We want as many rx rings enabled as there are cpus.
11113                  * In multiqueue MSI-X mode, the first MSI-X vector
11114                  * only deals with link interrupts, etc, so we add
11115                  * one to the number of vectors we are requesting.
11116                  */
11117                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11118         }
11119
11120         return irq_cnt;
11121 }
11122
11123 static bool tg3_enable_msix(struct tg3 *tp)
11124 {
11125         int i, rc;
11126         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11127
11128         tp->txq_cnt = tp->txq_req;
11129         tp->rxq_cnt = tp->rxq_req;
11130         if (!tp->rxq_cnt)
11131                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11132         if (tp->rxq_cnt > tp->rxq_max)
11133                 tp->rxq_cnt = tp->rxq_max;
11134
11135         /* Disable multiple TX rings by default.  Simple round-robin hardware
11136          * scheduling of the TX rings can cause starvation of rings with
11137          * small packets when other rings have TSO or jumbo packets.
11138          */
11139         if (!tp->txq_req)
11140                 tp->txq_cnt = 1;
11141
11142         tp->irq_cnt = tg3_irq_count(tp);
11143
11144         for (i = 0; i < tp->irq_max; i++) {
11145                 msix_ent[i].entry  = i;
11146                 msix_ent[i].vector = 0;
11147         }
11148
11149         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11150         if (rc < 0) {
11151                 return false;
11152         } else if (rc != 0) {
11153                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11154                         return false;
11155                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11156                               tp->irq_cnt, rc);
11157                 tp->irq_cnt = rc;
11158                 tp->rxq_cnt = max(rc - 1, 1);
11159                 if (tp->txq_cnt)
11160                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11161         }
11162
11163         for (i = 0; i < tp->irq_max; i++)
11164                 tp->napi[i].irq_vec = msix_ent[i].vector;
11165
11166         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11167                 pci_disable_msix(tp->pdev);
11168                 return false;
11169         }
11170
11171         if (tp->irq_cnt == 1)
11172                 return true;
11173
11174         tg3_flag_set(tp, ENABLE_RSS);
11175
11176         if (tp->txq_cnt > 1)
11177                 tg3_flag_set(tp, ENABLE_TSS);
11178
11179         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11180
11181         return true;
11182 }
11183
11184 static void tg3_ints_init(struct tg3 *tp)
11185 {
11186         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11187             !tg3_flag(tp, TAGGED_STATUS)) {
11188                 /* All MSI supporting chips should support tagged
11189                  * status.  Assert that this is the case.
11190                  */
11191                 netdev_warn(tp->dev,
11192                             "MSI without TAGGED_STATUS? Not using MSI\n");
11193                 goto defcfg;
11194         }
11195
11196         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11197                 tg3_flag_set(tp, USING_MSIX);
11198         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11199                 tg3_flag_set(tp, USING_MSI);
11200
11201         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11202                 u32 msi_mode = tr32(MSGINT_MODE);
11203                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11204                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11205                 if (!tg3_flag(tp, 1SHOT_MSI))
11206                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11207                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11208         }
11209 defcfg:
11210         if (!tg3_flag(tp, USING_MSIX)) {
11211                 tp->irq_cnt = 1;
11212                 tp->napi[0].irq_vec = tp->pdev->irq;
11213         }
11214
11215         if (tp->irq_cnt == 1) {
11216                 tp->txq_cnt = 1;
11217                 tp->rxq_cnt = 1;
11218                 netif_set_real_num_tx_queues(tp->dev, 1);
11219                 netif_set_real_num_rx_queues(tp->dev, 1);
11220         }
11221 }
11222
11223 static void tg3_ints_fini(struct tg3 *tp)
11224 {
11225         if (tg3_flag(tp, USING_MSIX))
11226                 pci_disable_msix(tp->pdev);
11227         else if (tg3_flag(tp, USING_MSI))
11228                 pci_disable_msi(tp->pdev);
11229         tg3_flag_clear(tp, USING_MSI);
11230         tg3_flag_clear(tp, USING_MSIX);
11231         tg3_flag_clear(tp, ENABLE_RSS);
11232         tg3_flag_clear(tp, ENABLE_TSS);
11233 }
11234
11235 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11236                      bool init)
11237 {
11238         struct net_device *dev = tp->dev;
11239         int i, err;
11240
11241         /*
11242          * Setup interrupts first so we know how
11243          * many NAPI resources to allocate
11244          */
11245         tg3_ints_init(tp);
11246
11247         tg3_rss_check_indir_tbl(tp);
11248
11249         /* The placement of this call is tied
11250          * to the setup and use of Host TX descriptors.
11251          */
11252         err = tg3_alloc_consistent(tp);
11253         if (err)
11254                 goto out_ints_fini;
11255
11256         tg3_napi_init(tp);
11257
11258         tg3_napi_enable(tp);
11259
11260         for (i = 0; i < tp->irq_cnt; i++) {
11261                 struct tg3_napi *tnapi = &tp->napi[i];
11262                 err = tg3_request_irq(tp, i);
11263                 if (err) {
11264                         for (i--; i >= 0; i--) {
11265                                 tnapi = &tp->napi[i];
11266                                 free_irq(tnapi->irq_vec, tnapi);
11267                         }
11268                         goto out_napi_fini;
11269                 }
11270         }
11271
11272         tg3_full_lock(tp, 0);
11273
11274         if (init)
11275                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11276
11277         err = tg3_init_hw(tp, reset_phy);
11278         if (err) {
11279                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11280                 tg3_free_rings(tp);
11281         }
11282
11283         tg3_full_unlock(tp);
11284
11285         if (err)
11286                 goto out_free_irq;
11287
11288         if (test_irq && tg3_flag(tp, USING_MSI)) {
11289                 err = tg3_test_msi(tp);
11290
11291                 if (err) {
11292                         tg3_full_lock(tp, 0);
11293                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11294                         tg3_free_rings(tp);
11295                         tg3_full_unlock(tp);
11296
11297                         goto out_napi_fini;
11298                 }
11299
11300                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11301                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11302
11303                         tw32(PCIE_TRANSACTION_CFG,
11304                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11305                 }
11306         }
11307
11308         tg3_phy_start(tp);
11309
11310         tg3_hwmon_open(tp);
11311
11312         tg3_full_lock(tp, 0);
11313
11314         tg3_timer_start(tp);
11315         tg3_flag_set(tp, INIT_COMPLETE);
11316         tg3_enable_ints(tp);
11317
11318         if (init)
11319                 tg3_ptp_init(tp);
11320         else
11321                 tg3_ptp_resume(tp);
11322
11323
11324         tg3_full_unlock(tp);
11325
11326         netif_tx_start_all_queues(dev);
11327
11328         /*
11329          * Reset loopback feature if it was turned on while the device was down
11330          * make sure that it's installed properly now.
11331          */
11332         if (dev->features & NETIF_F_LOOPBACK)
11333                 tg3_set_loopback(dev, dev->features);
11334
11335         return 0;
11336
11337 out_free_irq:
11338         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11339                 struct tg3_napi *tnapi = &tp->napi[i];
11340                 free_irq(tnapi->irq_vec, tnapi);
11341         }
11342
11343 out_napi_fini:
11344         tg3_napi_disable(tp);
11345         tg3_napi_fini(tp);
11346         tg3_free_consistent(tp);
11347
11348 out_ints_fini:
11349         tg3_ints_fini(tp);
11350
11351         return err;
11352 }
11353
11354 static void tg3_stop(struct tg3 *tp)
11355 {
11356         int i;
11357
11358         tg3_reset_task_cancel(tp);
11359         tg3_netif_stop(tp);
11360
11361         tg3_timer_stop(tp);
11362
11363         tg3_hwmon_close(tp);
11364
11365         tg3_phy_stop(tp);
11366
11367         tg3_full_lock(tp, 1);
11368
11369         tg3_disable_ints(tp);
11370
11371         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11372         tg3_free_rings(tp);
11373         tg3_flag_clear(tp, INIT_COMPLETE);
11374
11375         tg3_full_unlock(tp);
11376
11377         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11378                 struct tg3_napi *tnapi = &tp->napi[i];
11379                 free_irq(tnapi->irq_vec, tnapi);
11380         }
11381
11382         tg3_ints_fini(tp);
11383
11384         tg3_napi_fini(tp);
11385
11386         tg3_free_consistent(tp);
11387 }
11388
11389 static int tg3_open(struct net_device *dev)
11390 {
11391         struct tg3 *tp = netdev_priv(dev);
11392         int err;
11393
11394         if (tp->fw_needed) {
11395                 err = tg3_request_firmware(tp);
11396                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11397                         if (err) {
11398                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11399                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11400                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11401                                 netdev_warn(tp->dev, "EEE capability restored\n");
11402                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11403                         }
11404                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11405                         if (err)
11406                                 return err;
11407                 } else if (err) {
11408                         netdev_warn(tp->dev, "TSO capability disabled\n");
11409                         tg3_flag_clear(tp, TSO_CAPABLE);
11410                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11411                         netdev_notice(tp->dev, "TSO capability restored\n");
11412                         tg3_flag_set(tp, TSO_CAPABLE);
11413                 }
11414         }
11415
11416         tg3_carrier_off(tp);
11417
11418         err = tg3_power_up(tp);
11419         if (err)
11420                 return err;
11421
11422         tg3_full_lock(tp, 0);
11423
11424         tg3_disable_ints(tp);
11425         tg3_flag_clear(tp, INIT_COMPLETE);
11426
11427         tg3_full_unlock(tp);
11428
11429         err = tg3_start(tp,
11430                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11431                         true, true);
11432         if (err) {
11433                 tg3_frob_aux_power(tp, false);
11434                 pci_set_power_state(tp->pdev, PCI_D3hot);
11435         }
11436
11437         if (tg3_flag(tp, PTP_CAPABLE)) {
11438                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11439                                                    &tp->pdev->dev);
11440                 if (IS_ERR(tp->ptp_clock))
11441                         tp->ptp_clock = NULL;
11442         }
11443
11444         return err;
11445 }
11446
11447 static int tg3_close(struct net_device *dev)
11448 {
11449         struct tg3 *tp = netdev_priv(dev);
11450
11451         tg3_ptp_fini(tp);
11452
11453         tg3_stop(tp);
11454
11455         /* Clear stats across close / open calls */
11456         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11457         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11458
11459         tg3_power_down(tp);
11460
11461         tg3_carrier_off(tp);
11462
11463         return 0;
11464 }
11465
11466 static inline u64 get_stat64(tg3_stat64_t *val)
11467 {
11468        return ((u64)val->high << 32) | ((u64)val->low);
11469 }
11470
11471 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11472 {
11473         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11474
11475         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11476             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11477              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11478                 u32 val;
11479
11480                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11481                         tg3_writephy(tp, MII_TG3_TEST1,
11482                                      val | MII_TG3_TEST1_CRC_EN);
11483                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11484                 } else
11485                         val = 0;
11486
11487                 tp->phy_crc_errors += val;
11488
11489                 return tp->phy_crc_errors;
11490         }
11491
11492         return get_stat64(&hw_stats->rx_fcs_errors);
11493 }
11494
11495 #define ESTAT_ADD(member) \
11496         estats->member =        old_estats->member + \
11497                                 get_stat64(&hw_stats->member)
11498
11499 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11500 {
11501         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11502         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11503
11504         ESTAT_ADD(rx_octets);
11505         ESTAT_ADD(rx_fragments);
11506         ESTAT_ADD(rx_ucast_packets);
11507         ESTAT_ADD(rx_mcast_packets);
11508         ESTAT_ADD(rx_bcast_packets);
11509         ESTAT_ADD(rx_fcs_errors);
11510         ESTAT_ADD(rx_align_errors);
11511         ESTAT_ADD(rx_xon_pause_rcvd);
11512         ESTAT_ADD(rx_xoff_pause_rcvd);
11513         ESTAT_ADD(rx_mac_ctrl_rcvd);
11514         ESTAT_ADD(rx_xoff_entered);
11515         ESTAT_ADD(rx_frame_too_long_errors);
11516         ESTAT_ADD(rx_jabbers);
11517         ESTAT_ADD(rx_undersize_packets);
11518         ESTAT_ADD(rx_in_length_errors);
11519         ESTAT_ADD(rx_out_length_errors);
11520         ESTAT_ADD(rx_64_or_less_octet_packets);
11521         ESTAT_ADD(rx_65_to_127_octet_packets);
11522         ESTAT_ADD(rx_128_to_255_octet_packets);
11523         ESTAT_ADD(rx_256_to_511_octet_packets);
11524         ESTAT_ADD(rx_512_to_1023_octet_packets);
11525         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11526         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11527         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11528         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11529         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11530
11531         ESTAT_ADD(tx_octets);
11532         ESTAT_ADD(tx_collisions);
11533         ESTAT_ADD(tx_xon_sent);
11534         ESTAT_ADD(tx_xoff_sent);
11535         ESTAT_ADD(tx_flow_control);
11536         ESTAT_ADD(tx_mac_errors);
11537         ESTAT_ADD(tx_single_collisions);
11538         ESTAT_ADD(tx_mult_collisions);
11539         ESTAT_ADD(tx_deferred);
11540         ESTAT_ADD(tx_excessive_collisions);
11541         ESTAT_ADD(tx_late_collisions);
11542         ESTAT_ADD(tx_collide_2times);
11543         ESTAT_ADD(tx_collide_3times);
11544         ESTAT_ADD(tx_collide_4times);
11545         ESTAT_ADD(tx_collide_5times);
11546         ESTAT_ADD(tx_collide_6times);
11547         ESTAT_ADD(tx_collide_7times);
11548         ESTAT_ADD(tx_collide_8times);
11549         ESTAT_ADD(tx_collide_9times);
11550         ESTAT_ADD(tx_collide_10times);
11551         ESTAT_ADD(tx_collide_11times);
11552         ESTAT_ADD(tx_collide_12times);
11553         ESTAT_ADD(tx_collide_13times);
11554         ESTAT_ADD(tx_collide_14times);
11555         ESTAT_ADD(tx_collide_15times);
11556         ESTAT_ADD(tx_ucast_packets);
11557         ESTAT_ADD(tx_mcast_packets);
11558         ESTAT_ADD(tx_bcast_packets);
11559         ESTAT_ADD(tx_carrier_sense_errors);
11560         ESTAT_ADD(tx_discards);
11561         ESTAT_ADD(tx_errors);
11562
11563         ESTAT_ADD(dma_writeq_full);
11564         ESTAT_ADD(dma_write_prioq_full);
11565         ESTAT_ADD(rxbds_empty);
11566         ESTAT_ADD(rx_discards);
11567         ESTAT_ADD(rx_errors);
11568         ESTAT_ADD(rx_threshold_hit);
11569
11570         ESTAT_ADD(dma_readq_full);
11571         ESTAT_ADD(dma_read_prioq_full);
11572         ESTAT_ADD(tx_comp_queue_full);
11573
11574         ESTAT_ADD(ring_set_send_prod_index);
11575         ESTAT_ADD(ring_status_update);
11576         ESTAT_ADD(nic_irqs);
11577         ESTAT_ADD(nic_avoided_irqs);
11578         ESTAT_ADD(nic_tx_threshold_hit);
11579
11580         ESTAT_ADD(mbuf_lwm_thresh_hit);
11581 }
11582
11583 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11584 {
11585         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11586         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11587
11588         stats->rx_packets = old_stats->rx_packets +
11589                 get_stat64(&hw_stats->rx_ucast_packets) +
11590                 get_stat64(&hw_stats->rx_mcast_packets) +
11591                 get_stat64(&hw_stats->rx_bcast_packets);
11592
11593         stats->tx_packets = old_stats->tx_packets +
11594                 get_stat64(&hw_stats->tx_ucast_packets) +
11595                 get_stat64(&hw_stats->tx_mcast_packets) +
11596                 get_stat64(&hw_stats->tx_bcast_packets);
11597
11598         stats->rx_bytes = old_stats->rx_bytes +
11599                 get_stat64(&hw_stats->rx_octets);
11600         stats->tx_bytes = old_stats->tx_bytes +
11601                 get_stat64(&hw_stats->tx_octets);
11602
11603         stats->rx_errors = old_stats->rx_errors +
11604                 get_stat64(&hw_stats->rx_errors);
11605         stats->tx_errors = old_stats->tx_errors +
11606                 get_stat64(&hw_stats->tx_errors) +
11607                 get_stat64(&hw_stats->tx_mac_errors) +
11608                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11609                 get_stat64(&hw_stats->tx_discards);
11610
11611         stats->multicast = old_stats->multicast +
11612                 get_stat64(&hw_stats->rx_mcast_packets);
11613         stats->collisions = old_stats->collisions +
11614                 get_stat64(&hw_stats->tx_collisions);
11615
11616         stats->rx_length_errors = old_stats->rx_length_errors +
11617                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11618                 get_stat64(&hw_stats->rx_undersize_packets);
11619
11620         stats->rx_over_errors = old_stats->rx_over_errors +
11621                 get_stat64(&hw_stats->rxbds_empty);
11622         stats->rx_frame_errors = old_stats->rx_frame_errors +
11623                 get_stat64(&hw_stats->rx_align_errors);
11624         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11625                 get_stat64(&hw_stats->tx_discards);
11626         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11627                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11628
11629         stats->rx_crc_errors = old_stats->rx_crc_errors +
11630                 tg3_calc_crc_errors(tp);
11631
11632         stats->rx_missed_errors = old_stats->rx_missed_errors +
11633                 get_stat64(&hw_stats->rx_discards);
11634
11635         stats->rx_dropped = tp->rx_dropped;
11636         stats->tx_dropped = tp->tx_dropped;
11637 }
11638
11639 static int tg3_get_regs_len(struct net_device *dev)
11640 {
11641         return TG3_REG_BLK_SIZE;
11642 }
11643
11644 static void tg3_get_regs(struct net_device *dev,
11645                 struct ethtool_regs *regs, void *_p)
11646 {
11647         struct tg3 *tp = netdev_priv(dev);
11648
11649         regs->version = 0;
11650
11651         memset(_p, 0, TG3_REG_BLK_SIZE);
11652
11653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11654                 return;
11655
11656         tg3_full_lock(tp, 0);
11657
11658         tg3_dump_legacy_regs(tp, (u32 *)_p);
11659
11660         tg3_full_unlock(tp);
11661 }
11662
11663 static int tg3_get_eeprom_len(struct net_device *dev)
11664 {
11665         struct tg3 *tp = netdev_priv(dev);
11666
11667         return tp->nvram_size;
11668 }
11669
11670 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11671 {
11672         struct tg3 *tp = netdev_priv(dev);
11673         int ret;
11674         u8  *pd;
11675         u32 i, offset, len, b_offset, b_count;
11676         __be32 val;
11677
11678         if (tg3_flag(tp, NO_NVRAM))
11679                 return -EINVAL;
11680
11681         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11682                 return -EAGAIN;
11683
11684         offset = eeprom->offset;
11685         len = eeprom->len;
11686         eeprom->len = 0;
11687
11688         eeprom->magic = TG3_EEPROM_MAGIC;
11689
11690         if (offset & 3) {
11691                 /* adjustments to start on required 4 byte boundary */
11692                 b_offset = offset & 3;
11693                 b_count = 4 - b_offset;
11694                 if (b_count > len) {
11695                         /* i.e. offset=1 len=2 */
11696                         b_count = len;
11697                 }
11698                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11699                 if (ret)
11700                         return ret;
11701                 memcpy(data, ((char *)&val) + b_offset, b_count);
11702                 len -= b_count;
11703                 offset += b_count;
11704                 eeprom->len += b_count;
11705         }
11706
11707         /* read bytes up to the last 4 byte boundary */
11708         pd = &data[eeprom->len];
11709         for (i = 0; i < (len - (len & 3)); i += 4) {
11710                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11711                 if (ret) {
11712                         eeprom->len += i;
11713                         return ret;
11714                 }
11715                 memcpy(pd + i, &val, 4);
11716         }
11717         eeprom->len += i;
11718
11719         if (len & 3) {
11720                 /* read last bytes not ending on 4 byte boundary */
11721                 pd = &data[eeprom->len];
11722                 b_count = len & 3;
11723                 b_offset = offset + len - b_count;
11724                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11725                 if (ret)
11726                         return ret;
11727                 memcpy(pd, &val, b_count);
11728                 eeprom->len += b_count;
11729         }
11730         return 0;
11731 }
11732
11733 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11734 {
11735         struct tg3 *tp = netdev_priv(dev);
11736         int ret;
11737         u32 offset, len, b_offset, odd_len;
11738         u8 *buf;
11739         __be32 start, end;
11740
11741         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11742                 return -EAGAIN;
11743
11744         if (tg3_flag(tp, NO_NVRAM) ||
11745             eeprom->magic != TG3_EEPROM_MAGIC)
11746                 return -EINVAL;
11747
11748         offset = eeprom->offset;
11749         len = eeprom->len;
11750
11751         if ((b_offset = (offset & 3))) {
11752                 /* adjustments to start on required 4 byte boundary */
11753                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11754                 if (ret)
11755                         return ret;
11756                 len += b_offset;
11757                 offset &= ~3;
11758                 if (len < 4)
11759                         len = 4;
11760         }
11761
11762         odd_len = 0;
11763         if (len & 3) {
11764                 /* adjustments to end on required 4 byte boundary */
11765                 odd_len = 1;
11766                 len = (len + 3) & ~3;
11767                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11768                 if (ret)
11769                         return ret;
11770         }
11771
11772         buf = data;
11773         if (b_offset || odd_len) {
11774                 buf = kmalloc(len, GFP_KERNEL);
11775                 if (!buf)
11776                         return -ENOMEM;
11777                 if (b_offset)
11778                         memcpy(buf, &start, 4);
11779                 if (odd_len)
11780                         memcpy(buf+len-4, &end, 4);
11781                 memcpy(buf + b_offset, data, eeprom->len);
11782         }
11783
11784         ret = tg3_nvram_write_block(tp, offset, len, buf);
11785
11786         if (buf != data)
11787                 kfree(buf);
11788
11789         return ret;
11790 }
11791
11792 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11793 {
11794         struct tg3 *tp = netdev_priv(dev);
11795
11796         if (tg3_flag(tp, USE_PHYLIB)) {
11797                 struct phy_device *phydev;
11798                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11799                         return -EAGAIN;
11800                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11801                 return phy_ethtool_gset(phydev, cmd);
11802         }
11803
11804         cmd->supported = (SUPPORTED_Autoneg);
11805
11806         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11807                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11808                                    SUPPORTED_1000baseT_Full);
11809
11810         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11811                 cmd->supported |= (SUPPORTED_100baseT_Half |
11812                                   SUPPORTED_100baseT_Full |
11813                                   SUPPORTED_10baseT_Half |
11814                                   SUPPORTED_10baseT_Full |
11815                                   SUPPORTED_TP);
11816                 cmd->port = PORT_TP;
11817         } else {
11818                 cmd->supported |= SUPPORTED_FIBRE;
11819                 cmd->port = PORT_FIBRE;
11820         }
11821
11822         cmd->advertising = tp->link_config.advertising;
11823         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11824                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11825                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11826                                 cmd->advertising |= ADVERTISED_Pause;
11827                         } else {
11828                                 cmd->advertising |= ADVERTISED_Pause |
11829                                                     ADVERTISED_Asym_Pause;
11830                         }
11831                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11832                         cmd->advertising |= ADVERTISED_Asym_Pause;
11833                 }
11834         }
11835         if (netif_running(dev) && tp->link_up) {
11836                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11837                 cmd->duplex = tp->link_config.active_duplex;
11838                 cmd->lp_advertising = tp->link_config.rmt_adv;
11839                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11840                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11841                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11842                         else
11843                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11844                 }
11845         } else {
11846                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11847                 cmd->duplex = DUPLEX_UNKNOWN;
11848                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11849         }
11850         cmd->phy_address = tp->phy_addr;
11851         cmd->transceiver = XCVR_INTERNAL;
11852         cmd->autoneg = tp->link_config.autoneg;
11853         cmd->maxtxpkt = 0;
11854         cmd->maxrxpkt = 0;
11855         return 0;
11856 }
11857
11858 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11859 {
11860         struct tg3 *tp = netdev_priv(dev);
11861         u32 speed = ethtool_cmd_speed(cmd);
11862
11863         if (tg3_flag(tp, USE_PHYLIB)) {
11864                 struct phy_device *phydev;
11865                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11866                         return -EAGAIN;
11867                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11868                 return phy_ethtool_sset(phydev, cmd);
11869         }
11870
11871         if (cmd->autoneg != AUTONEG_ENABLE &&
11872             cmd->autoneg != AUTONEG_DISABLE)
11873                 return -EINVAL;
11874
11875         if (cmd->autoneg == AUTONEG_DISABLE &&
11876             cmd->duplex != DUPLEX_FULL &&
11877             cmd->duplex != DUPLEX_HALF)
11878                 return -EINVAL;
11879
11880         if (cmd->autoneg == AUTONEG_ENABLE) {
11881                 u32 mask = ADVERTISED_Autoneg |
11882                            ADVERTISED_Pause |
11883                            ADVERTISED_Asym_Pause;
11884
11885                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11886                         mask |= ADVERTISED_1000baseT_Half |
11887                                 ADVERTISED_1000baseT_Full;
11888
11889                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11890                         mask |= ADVERTISED_100baseT_Half |
11891                                 ADVERTISED_100baseT_Full |
11892                                 ADVERTISED_10baseT_Half |
11893                                 ADVERTISED_10baseT_Full |
11894                                 ADVERTISED_TP;
11895                 else
11896                         mask |= ADVERTISED_FIBRE;
11897
11898                 if (cmd->advertising & ~mask)
11899                         return -EINVAL;
11900
11901                 mask &= (ADVERTISED_1000baseT_Half |
11902                          ADVERTISED_1000baseT_Full |
11903                          ADVERTISED_100baseT_Half |
11904                          ADVERTISED_100baseT_Full |
11905                          ADVERTISED_10baseT_Half |
11906                          ADVERTISED_10baseT_Full);
11907
11908                 cmd->advertising &= mask;
11909         } else {
11910                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11911                         if (speed != SPEED_1000)
11912                                 return -EINVAL;
11913
11914                         if (cmd->duplex != DUPLEX_FULL)
11915                                 return -EINVAL;
11916                 } else {
11917                         if (speed != SPEED_100 &&
11918                             speed != SPEED_10)
11919                                 return -EINVAL;
11920                 }
11921         }
11922
11923         tg3_full_lock(tp, 0);
11924
11925         tp->link_config.autoneg = cmd->autoneg;
11926         if (cmd->autoneg == AUTONEG_ENABLE) {
11927                 tp->link_config.advertising = (cmd->advertising |
11928                                               ADVERTISED_Autoneg);
11929                 tp->link_config.speed = SPEED_UNKNOWN;
11930                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11931         } else {
11932                 tp->link_config.advertising = 0;
11933                 tp->link_config.speed = speed;
11934                 tp->link_config.duplex = cmd->duplex;
11935         }
11936
11937         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11938
11939         tg3_warn_mgmt_link_flap(tp);
11940
11941         if (netif_running(dev))
11942                 tg3_setup_phy(tp, true);
11943
11944         tg3_full_unlock(tp);
11945
11946         return 0;
11947 }
11948
11949 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11950 {
11951         struct tg3 *tp = netdev_priv(dev);
11952
11953         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11954         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11955         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11956         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11957 }
11958
11959 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11960 {
11961         struct tg3 *tp = netdev_priv(dev);
11962
11963         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11964                 wol->supported = WAKE_MAGIC;
11965         else
11966                 wol->supported = 0;
11967         wol->wolopts = 0;
11968         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11969                 wol->wolopts = WAKE_MAGIC;
11970         memset(&wol->sopass, 0, sizeof(wol->sopass));
11971 }
11972
11973 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11974 {
11975         struct tg3 *tp = netdev_priv(dev);
11976         struct device *dp = &tp->pdev->dev;
11977
11978         if (wol->wolopts & ~WAKE_MAGIC)
11979                 return -EINVAL;
11980         if ((wol->wolopts & WAKE_MAGIC) &&
11981             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11982                 return -EINVAL;
11983
11984         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11985
11986         spin_lock_bh(&tp->lock);
11987         if (device_may_wakeup(dp))
11988                 tg3_flag_set(tp, WOL_ENABLE);
11989         else
11990                 tg3_flag_clear(tp, WOL_ENABLE);
11991         spin_unlock_bh(&tp->lock);
11992
11993         return 0;
11994 }
11995
11996 static u32 tg3_get_msglevel(struct net_device *dev)
11997 {
11998         struct tg3 *tp = netdev_priv(dev);
11999         return tp->msg_enable;
12000 }
12001
12002 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12003 {
12004         struct tg3 *tp = netdev_priv(dev);
12005         tp->msg_enable = value;
12006 }
12007
12008 static int tg3_nway_reset(struct net_device *dev)
12009 {
12010         struct tg3 *tp = netdev_priv(dev);
12011         int r;
12012
12013         if (!netif_running(dev))
12014                 return -EAGAIN;
12015
12016         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12017                 return -EINVAL;
12018
12019         tg3_warn_mgmt_link_flap(tp);
12020
12021         if (tg3_flag(tp, USE_PHYLIB)) {
12022                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12023                         return -EAGAIN;
12024                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12025         } else {
12026                 u32 bmcr;
12027
12028                 spin_lock_bh(&tp->lock);
12029                 r = -EINVAL;
12030                 tg3_readphy(tp, MII_BMCR, &bmcr);
12031                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12032                     ((bmcr & BMCR_ANENABLE) ||
12033                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12034                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12035                                                    BMCR_ANENABLE);
12036                         r = 0;
12037                 }
12038                 spin_unlock_bh(&tp->lock);
12039         }
12040
12041         return r;
12042 }
12043
12044 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12045 {
12046         struct tg3 *tp = netdev_priv(dev);
12047
12048         ering->rx_max_pending = tp->rx_std_ring_mask;
12049         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12050                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12051         else
12052                 ering->rx_jumbo_max_pending = 0;
12053
12054         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12055
12056         ering->rx_pending = tp->rx_pending;
12057         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12058                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12059         else
12060                 ering->rx_jumbo_pending = 0;
12061
12062         ering->tx_pending = tp->napi[0].tx_pending;
12063 }
12064
12065 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12066 {
12067         struct tg3 *tp = netdev_priv(dev);
12068         int i, irq_sync = 0, err = 0;
12069
12070         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12071             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12072             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12073             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12074             (tg3_flag(tp, TSO_BUG) &&
12075              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12076                 return -EINVAL;
12077
12078         if (netif_running(dev)) {
12079                 tg3_phy_stop(tp);
12080                 tg3_netif_stop(tp);
12081                 irq_sync = 1;
12082         }
12083
12084         tg3_full_lock(tp, irq_sync);
12085
12086         tp->rx_pending = ering->rx_pending;
12087
12088         if (tg3_flag(tp, MAX_RXPEND_64) &&
12089             tp->rx_pending > 63)
12090                 tp->rx_pending = 63;
12091         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12092
12093         for (i = 0; i < tp->irq_max; i++)
12094                 tp->napi[i].tx_pending = ering->tx_pending;
12095
12096         if (netif_running(dev)) {
12097                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12098                 err = tg3_restart_hw(tp, false);
12099                 if (!err)
12100                         tg3_netif_start(tp);
12101         }
12102
12103         tg3_full_unlock(tp);
12104
12105         if (irq_sync && !err)
12106                 tg3_phy_start(tp);
12107
12108         return err;
12109 }
12110
12111 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12112 {
12113         struct tg3 *tp = netdev_priv(dev);
12114
12115         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12116
12117         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12118                 epause->rx_pause = 1;
12119         else
12120                 epause->rx_pause = 0;
12121
12122         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12123                 epause->tx_pause = 1;
12124         else
12125                 epause->tx_pause = 0;
12126 }
12127
12128 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12129 {
12130         struct tg3 *tp = netdev_priv(dev);
12131         int err = 0;
12132
12133         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12134                 tg3_warn_mgmt_link_flap(tp);
12135
12136         if (tg3_flag(tp, USE_PHYLIB)) {
12137                 u32 newadv;
12138                 struct phy_device *phydev;
12139
12140                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12141
12142                 if (!(phydev->supported & SUPPORTED_Pause) ||
12143                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12144                      (epause->rx_pause != epause->tx_pause)))
12145                         return -EINVAL;
12146
12147                 tp->link_config.flowctrl = 0;
12148                 if (epause->rx_pause) {
12149                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12150
12151                         if (epause->tx_pause) {
12152                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12153                                 newadv = ADVERTISED_Pause;
12154                         } else
12155                                 newadv = ADVERTISED_Pause |
12156                                          ADVERTISED_Asym_Pause;
12157                 } else if (epause->tx_pause) {
12158                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12159                         newadv = ADVERTISED_Asym_Pause;
12160                 } else
12161                         newadv = 0;
12162
12163                 if (epause->autoneg)
12164                         tg3_flag_set(tp, PAUSE_AUTONEG);
12165                 else
12166                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12167
12168                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12169                         u32 oldadv = phydev->advertising &
12170                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12171                         if (oldadv != newadv) {
12172                                 phydev->advertising &=
12173                                         ~(ADVERTISED_Pause |
12174                                           ADVERTISED_Asym_Pause);
12175                                 phydev->advertising |= newadv;
12176                                 if (phydev->autoneg) {
12177                                         /*
12178                                          * Always renegotiate the link to
12179                                          * inform our link partner of our
12180                                          * flow control settings, even if the
12181                                          * flow control is forced.  Let
12182                                          * tg3_adjust_link() do the final
12183                                          * flow control setup.
12184                                          */
12185                                         return phy_start_aneg(phydev);
12186                                 }
12187                         }
12188
12189                         if (!epause->autoneg)
12190                                 tg3_setup_flow_control(tp, 0, 0);
12191                 } else {
12192                         tp->link_config.advertising &=
12193                                         ~(ADVERTISED_Pause |
12194                                           ADVERTISED_Asym_Pause);
12195                         tp->link_config.advertising |= newadv;
12196                 }
12197         } else {
12198                 int irq_sync = 0;
12199
12200                 if (netif_running(dev)) {
12201                         tg3_netif_stop(tp);
12202                         irq_sync = 1;
12203                 }
12204
12205                 tg3_full_lock(tp, irq_sync);
12206
12207                 if (epause->autoneg)
12208                         tg3_flag_set(tp, PAUSE_AUTONEG);
12209                 else
12210                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12211                 if (epause->rx_pause)
12212                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12213                 else
12214                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12215                 if (epause->tx_pause)
12216                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12217                 else
12218                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12219
12220                 if (netif_running(dev)) {
12221                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12222                         err = tg3_restart_hw(tp, false);
12223                         if (!err)
12224                                 tg3_netif_start(tp);
12225                 }
12226
12227                 tg3_full_unlock(tp);
12228         }
12229
12230         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12231
12232         return err;
12233 }
12234
12235 static int tg3_get_sset_count(struct net_device *dev, int sset)
12236 {
12237         switch (sset) {
12238         case ETH_SS_TEST:
12239                 return TG3_NUM_TEST;
12240         case ETH_SS_STATS:
12241                 return TG3_NUM_STATS;
12242         default:
12243                 return -EOPNOTSUPP;
12244         }
12245 }
12246
12247 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12248                          u32 *rules __always_unused)
12249 {
12250         struct tg3 *tp = netdev_priv(dev);
12251
12252         if (!tg3_flag(tp, SUPPORT_MSIX))
12253                 return -EOPNOTSUPP;
12254
12255         switch (info->cmd) {
12256         case ETHTOOL_GRXRINGS:
12257                 if (netif_running(tp->dev))
12258                         info->data = tp->rxq_cnt;
12259                 else {
12260                         info->data = num_online_cpus();
12261                         if (info->data > TG3_RSS_MAX_NUM_QS)
12262                                 info->data = TG3_RSS_MAX_NUM_QS;
12263                 }
12264
12265                 /* The first interrupt vector only
12266                  * handles link interrupts.
12267                  */
12268                 info->data -= 1;
12269                 return 0;
12270
12271         default:
12272                 return -EOPNOTSUPP;
12273         }
12274 }
12275
12276 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12277 {
12278         u32 size = 0;
12279         struct tg3 *tp = netdev_priv(dev);
12280
12281         if (tg3_flag(tp, SUPPORT_MSIX))
12282                 size = TG3_RSS_INDIR_TBL_SIZE;
12283
12284         return size;
12285 }
12286
12287 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12288 {
12289         struct tg3 *tp = netdev_priv(dev);
12290         int i;
12291
12292         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12293                 indir[i] = tp->rss_ind_tbl[i];
12294
12295         return 0;
12296 }
12297
12298 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12299 {
12300         struct tg3 *tp = netdev_priv(dev);
12301         size_t i;
12302
12303         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12304                 tp->rss_ind_tbl[i] = indir[i];
12305
12306         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12307                 return 0;
12308
12309         /* It is legal to write the indirection
12310          * table while the device is running.
12311          */
12312         tg3_full_lock(tp, 0);
12313         tg3_rss_write_indir_tbl(tp);
12314         tg3_full_unlock(tp);
12315
12316         return 0;
12317 }
12318
12319 static void tg3_get_channels(struct net_device *dev,
12320                              struct ethtool_channels *channel)
12321 {
12322         struct tg3 *tp = netdev_priv(dev);
12323         u32 deflt_qs = netif_get_num_default_rss_queues();
12324
12325         channel->max_rx = tp->rxq_max;
12326         channel->max_tx = tp->txq_max;
12327
12328         if (netif_running(dev)) {
12329                 channel->rx_count = tp->rxq_cnt;
12330                 channel->tx_count = tp->txq_cnt;
12331         } else {
12332                 if (tp->rxq_req)
12333                         channel->rx_count = tp->rxq_req;
12334                 else
12335                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12336
12337                 if (tp->txq_req)
12338                         channel->tx_count = tp->txq_req;
12339                 else
12340                         channel->tx_count = min(deflt_qs, tp->txq_max);
12341         }
12342 }
12343
12344 static int tg3_set_channels(struct net_device *dev,
12345                             struct ethtool_channels *channel)
12346 {
12347         struct tg3 *tp = netdev_priv(dev);
12348
12349         if (!tg3_flag(tp, SUPPORT_MSIX))
12350                 return -EOPNOTSUPP;
12351
12352         if (channel->rx_count > tp->rxq_max ||
12353             channel->tx_count > tp->txq_max)
12354                 return -EINVAL;
12355
12356         tp->rxq_req = channel->rx_count;
12357         tp->txq_req = channel->tx_count;
12358
12359         if (!netif_running(dev))
12360                 return 0;
12361
12362         tg3_stop(tp);
12363
12364         tg3_carrier_off(tp);
12365
12366         tg3_start(tp, true, false, false);
12367
12368         return 0;
12369 }
12370
12371 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12372 {
12373         switch (stringset) {
12374         case ETH_SS_STATS:
12375                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12376                 break;
12377         case ETH_SS_TEST:
12378                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12379                 break;
12380         default:
12381                 WARN_ON(1);     /* we need a WARN() */
12382                 break;
12383         }
12384 }
12385
12386 static int tg3_set_phys_id(struct net_device *dev,
12387                             enum ethtool_phys_id_state state)
12388 {
12389         struct tg3 *tp = netdev_priv(dev);
12390
12391         if (!netif_running(tp->dev))
12392                 return -EAGAIN;
12393
12394         switch (state) {
12395         case ETHTOOL_ID_ACTIVE:
12396                 return 1;       /* cycle on/off once per second */
12397
12398         case ETHTOOL_ID_ON:
12399                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12400                      LED_CTRL_1000MBPS_ON |
12401                      LED_CTRL_100MBPS_ON |
12402                      LED_CTRL_10MBPS_ON |
12403                      LED_CTRL_TRAFFIC_OVERRIDE |
12404                      LED_CTRL_TRAFFIC_BLINK |
12405                      LED_CTRL_TRAFFIC_LED);
12406                 break;
12407
12408         case ETHTOOL_ID_OFF:
12409                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12410                      LED_CTRL_TRAFFIC_OVERRIDE);
12411                 break;
12412
12413         case ETHTOOL_ID_INACTIVE:
12414                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12415                 break;
12416         }
12417
12418         return 0;
12419 }
12420
12421 static void tg3_get_ethtool_stats(struct net_device *dev,
12422                                    struct ethtool_stats *estats, u64 *tmp_stats)
12423 {
12424         struct tg3 *tp = netdev_priv(dev);
12425
12426         if (tp->hw_stats)
12427                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12428         else
12429                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12430 }
12431
12432 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12433 {
12434         int i;
12435         __be32 *buf;
12436         u32 offset = 0, len = 0;
12437         u32 magic, val;
12438
12439         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12440                 return NULL;
12441
12442         if (magic == TG3_EEPROM_MAGIC) {
12443                 for (offset = TG3_NVM_DIR_START;
12444                      offset < TG3_NVM_DIR_END;
12445                      offset += TG3_NVM_DIRENT_SIZE) {
12446                         if (tg3_nvram_read(tp, offset, &val))
12447                                 return NULL;
12448
12449                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12450                             TG3_NVM_DIRTYPE_EXTVPD)
12451                                 break;
12452                 }
12453
12454                 if (offset != TG3_NVM_DIR_END) {
12455                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12456                         if (tg3_nvram_read(tp, offset + 4, &offset))
12457                                 return NULL;
12458
12459                         offset = tg3_nvram_logical_addr(tp, offset);
12460                 }
12461         }
12462
12463         if (!offset || !len) {
12464                 offset = TG3_NVM_VPD_OFF;
12465                 len = TG3_NVM_VPD_LEN;
12466         }
12467
12468         buf = kmalloc(len, GFP_KERNEL);
12469         if (buf == NULL)
12470                 return NULL;
12471
12472         if (magic == TG3_EEPROM_MAGIC) {
12473                 for (i = 0; i < len; i += 4) {
12474                         /* The data is in little-endian format in NVRAM.
12475                          * Use the big-endian read routines to preserve
12476                          * the byte order as it exists in NVRAM.
12477                          */
12478                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12479                                 goto error;
12480                 }
12481         } else {
12482                 u8 *ptr;
12483                 ssize_t cnt;
12484                 unsigned int pos = 0;
12485
12486                 ptr = (u8 *)&buf[0];
12487                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12488                         cnt = pci_read_vpd(tp->pdev, pos,
12489                                            len - pos, ptr);
12490                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12491                                 cnt = 0;
12492                         else if (cnt < 0)
12493                                 goto error;
12494                 }
12495                 if (pos != len)
12496                         goto error;
12497         }
12498
12499         *vpdlen = len;
12500
12501         return buf;
12502
12503 error:
12504         kfree(buf);
12505         return NULL;
12506 }
12507
12508 #define NVRAM_TEST_SIZE 0x100
12509 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12510 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12511 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12512 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12513 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12514 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12515 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12516 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12517
12518 static int tg3_test_nvram(struct tg3 *tp)
12519 {
12520         u32 csum, magic, len;
12521         __be32 *buf;
12522         int i, j, k, err = 0, size;
12523
12524         if (tg3_flag(tp, NO_NVRAM))
12525                 return 0;
12526
12527         if (tg3_nvram_read(tp, 0, &magic) != 0)
12528                 return -EIO;
12529
12530         if (magic == TG3_EEPROM_MAGIC)
12531                 size = NVRAM_TEST_SIZE;
12532         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12533                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12534                     TG3_EEPROM_SB_FORMAT_1) {
12535                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12536                         case TG3_EEPROM_SB_REVISION_0:
12537                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12538                                 break;
12539                         case TG3_EEPROM_SB_REVISION_2:
12540                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12541                                 break;
12542                         case TG3_EEPROM_SB_REVISION_3:
12543                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12544                                 break;
12545                         case TG3_EEPROM_SB_REVISION_4:
12546                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12547                                 break;
12548                         case TG3_EEPROM_SB_REVISION_5:
12549                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12550                                 break;
12551                         case TG3_EEPROM_SB_REVISION_6:
12552                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12553                                 break;
12554                         default:
12555                                 return -EIO;
12556                         }
12557                 } else
12558                         return 0;
12559         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12560                 size = NVRAM_SELFBOOT_HW_SIZE;
12561         else
12562                 return -EIO;
12563
12564         buf = kmalloc(size, GFP_KERNEL);
12565         if (buf == NULL)
12566                 return -ENOMEM;
12567
12568         err = -EIO;
12569         for (i = 0, j = 0; i < size; i += 4, j++) {
12570                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12571                 if (err)
12572                         break;
12573         }
12574         if (i < size)
12575                 goto out;
12576
12577         /* Selfboot format */
12578         magic = be32_to_cpu(buf[0]);
12579         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12580             TG3_EEPROM_MAGIC_FW) {
12581                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12582
12583                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12584                     TG3_EEPROM_SB_REVISION_2) {
12585                         /* For rev 2, the csum doesn't include the MBA. */
12586                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12587                                 csum8 += buf8[i];
12588                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12589                                 csum8 += buf8[i];
12590                 } else {
12591                         for (i = 0; i < size; i++)
12592                                 csum8 += buf8[i];
12593                 }
12594
12595                 if (csum8 == 0) {
12596                         err = 0;
12597                         goto out;
12598                 }
12599
12600                 err = -EIO;
12601                 goto out;
12602         }
12603
12604         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12605             TG3_EEPROM_MAGIC_HW) {
12606                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12607                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12608                 u8 *buf8 = (u8 *) buf;
12609
12610                 /* Separate the parity bits and the data bytes.  */
12611                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12612                         if ((i == 0) || (i == 8)) {
12613                                 int l;
12614                                 u8 msk;
12615
12616                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12617                                         parity[k++] = buf8[i] & msk;
12618                                 i++;
12619                         } else if (i == 16) {
12620                                 int l;
12621                                 u8 msk;
12622
12623                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12624                                         parity[k++] = buf8[i] & msk;
12625                                 i++;
12626
12627                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12628                                         parity[k++] = buf8[i] & msk;
12629                                 i++;
12630                         }
12631                         data[j++] = buf8[i];
12632                 }
12633
12634                 err = -EIO;
12635                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12636                         u8 hw8 = hweight8(data[i]);
12637
12638                         if ((hw8 & 0x1) && parity[i])
12639                                 goto out;
12640                         else if (!(hw8 & 0x1) && !parity[i])
12641                                 goto out;
12642                 }
12643                 err = 0;
12644                 goto out;
12645         }
12646
12647         err = -EIO;
12648
12649         /* Bootstrap checksum at offset 0x10 */
12650         csum = calc_crc((unsigned char *) buf, 0x10);
12651         if (csum != le32_to_cpu(buf[0x10/4]))
12652                 goto out;
12653
12654         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12655         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12656         if (csum != le32_to_cpu(buf[0xfc/4]))
12657                 goto out;
12658
12659         kfree(buf);
12660
12661         buf = tg3_vpd_readblock(tp, &len);
12662         if (!buf)
12663                 return -ENOMEM;
12664
12665         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12666         if (i > 0) {
12667                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12668                 if (j < 0)
12669                         goto out;
12670
12671                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12672                         goto out;
12673
12674                 i += PCI_VPD_LRDT_TAG_SIZE;
12675                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12676                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12677                 if (j > 0) {
12678                         u8 csum8 = 0;
12679
12680                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12681
12682                         for (i = 0; i <= j; i++)
12683                                 csum8 += ((u8 *)buf)[i];
12684
12685                         if (csum8)
12686                                 goto out;
12687                 }
12688         }
12689
12690         err = 0;
12691
12692 out:
12693         kfree(buf);
12694         return err;
12695 }
12696
12697 #define TG3_SERDES_TIMEOUT_SEC  2
12698 #define TG3_COPPER_TIMEOUT_SEC  6
12699
12700 static int tg3_test_link(struct tg3 *tp)
12701 {
12702         int i, max;
12703
12704         if (!netif_running(tp->dev))
12705                 return -ENODEV;
12706
12707         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12708                 max = TG3_SERDES_TIMEOUT_SEC;
12709         else
12710                 max = TG3_COPPER_TIMEOUT_SEC;
12711
12712         for (i = 0; i < max; i++) {
12713                 if (tp->link_up)
12714                         return 0;
12715
12716                 if (msleep_interruptible(1000))
12717                         break;
12718         }
12719
12720         return -EIO;
12721 }
12722
12723 /* Only test the commonly used registers */
12724 static int tg3_test_registers(struct tg3 *tp)
12725 {
12726         int i, is_5705, is_5750;
12727         u32 offset, read_mask, write_mask, val, save_val, read_val;
12728         static struct {
12729                 u16 offset;
12730                 u16 flags;
12731 #define TG3_FL_5705     0x1
12732 #define TG3_FL_NOT_5705 0x2
12733 #define TG3_FL_NOT_5788 0x4
12734 #define TG3_FL_NOT_5750 0x8
12735                 u32 read_mask;
12736                 u32 write_mask;
12737         } reg_tbl[] = {
12738                 /* MAC Control Registers */
12739                 { MAC_MODE, TG3_FL_NOT_5705,
12740                         0x00000000, 0x00ef6f8c },
12741                 { MAC_MODE, TG3_FL_5705,
12742                         0x00000000, 0x01ef6b8c },
12743                 { MAC_STATUS, TG3_FL_NOT_5705,
12744                         0x03800107, 0x00000000 },
12745                 { MAC_STATUS, TG3_FL_5705,
12746                         0x03800100, 0x00000000 },
12747                 { MAC_ADDR_0_HIGH, 0x0000,
12748                         0x00000000, 0x0000ffff },
12749                 { MAC_ADDR_0_LOW, 0x0000,
12750                         0x00000000, 0xffffffff },
12751                 { MAC_RX_MTU_SIZE, 0x0000,
12752                         0x00000000, 0x0000ffff },
12753                 { MAC_TX_MODE, 0x0000,
12754                         0x00000000, 0x00000070 },
12755                 { MAC_TX_LENGTHS, 0x0000,
12756                         0x00000000, 0x00003fff },
12757                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12758                         0x00000000, 0x000007fc },
12759                 { MAC_RX_MODE, TG3_FL_5705,
12760                         0x00000000, 0x000007dc },
12761                 { MAC_HASH_REG_0, 0x0000,
12762                         0x00000000, 0xffffffff },
12763                 { MAC_HASH_REG_1, 0x0000,
12764                         0x00000000, 0xffffffff },
12765                 { MAC_HASH_REG_2, 0x0000,
12766                         0x00000000, 0xffffffff },
12767                 { MAC_HASH_REG_3, 0x0000,
12768                         0x00000000, 0xffffffff },
12769
12770                 /* Receive Data and Receive BD Initiator Control Registers. */
12771                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12772                         0x00000000, 0xffffffff },
12773                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12774                         0x00000000, 0xffffffff },
12775                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12776                         0x00000000, 0x00000003 },
12777                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12778                         0x00000000, 0xffffffff },
12779                 { RCVDBDI_STD_BD+0, 0x0000,
12780                         0x00000000, 0xffffffff },
12781                 { RCVDBDI_STD_BD+4, 0x0000,
12782                         0x00000000, 0xffffffff },
12783                 { RCVDBDI_STD_BD+8, 0x0000,
12784                         0x00000000, 0xffff0002 },
12785                 { RCVDBDI_STD_BD+0xc, 0x0000,
12786                         0x00000000, 0xffffffff },
12787
12788                 /* Receive BD Initiator Control Registers. */
12789                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12790                         0x00000000, 0xffffffff },
12791                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12792                         0x00000000, 0x000003ff },
12793                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12794                         0x00000000, 0xffffffff },
12795
12796                 /* Host Coalescing Control Registers. */
12797                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12798                         0x00000000, 0x00000004 },
12799                 { HOSTCC_MODE, TG3_FL_5705,
12800                         0x00000000, 0x000000f6 },
12801                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12802                         0x00000000, 0xffffffff },
12803                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12804                         0x00000000, 0x000003ff },
12805                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12806                         0x00000000, 0xffffffff },
12807                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12808                         0x00000000, 0x000003ff },
12809                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12810                         0x00000000, 0xffffffff },
12811                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12812                         0x00000000, 0x000000ff },
12813                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12814                         0x00000000, 0xffffffff },
12815                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12816                         0x00000000, 0x000000ff },
12817                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12818                         0x00000000, 0xffffffff },
12819                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12820                         0x00000000, 0xffffffff },
12821                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12822                         0x00000000, 0xffffffff },
12823                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12824                         0x00000000, 0x000000ff },
12825                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12826                         0x00000000, 0xffffffff },
12827                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12828                         0x00000000, 0x000000ff },
12829                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12830                         0x00000000, 0xffffffff },
12831                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12832                         0x00000000, 0xffffffff },
12833                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12834                         0x00000000, 0xffffffff },
12835                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12836                         0x00000000, 0xffffffff },
12837                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12838                         0x00000000, 0xffffffff },
12839                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12840                         0xffffffff, 0x00000000 },
12841                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12842                         0xffffffff, 0x00000000 },
12843
12844                 /* Buffer Manager Control Registers. */
12845                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12846                         0x00000000, 0x007fff80 },
12847                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12848                         0x00000000, 0x007fffff },
12849                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12850                         0x00000000, 0x0000003f },
12851                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12852                         0x00000000, 0x000001ff },
12853                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12854                         0x00000000, 0x000001ff },
12855                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12856                         0xffffffff, 0x00000000 },
12857                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12858                         0xffffffff, 0x00000000 },
12859
12860                 /* Mailbox Registers */
12861                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12862                         0x00000000, 0x000001ff },
12863                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12864                         0x00000000, 0x000001ff },
12865                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12866                         0x00000000, 0x000007ff },
12867                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12868                         0x00000000, 0x000001ff },
12869
12870                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12871         };
12872
12873         is_5705 = is_5750 = 0;
12874         if (tg3_flag(tp, 5705_PLUS)) {
12875                 is_5705 = 1;
12876                 if (tg3_flag(tp, 5750_PLUS))
12877                         is_5750 = 1;
12878         }
12879
12880         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12881                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12882                         continue;
12883
12884                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12885                         continue;
12886
12887                 if (tg3_flag(tp, IS_5788) &&
12888                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12889                         continue;
12890
12891                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12892                         continue;
12893
12894                 offset = (u32) reg_tbl[i].offset;
12895                 read_mask = reg_tbl[i].read_mask;
12896                 write_mask = reg_tbl[i].write_mask;
12897
12898                 /* Save the original register content */
12899                 save_val = tr32(offset);
12900
12901                 /* Determine the read-only value. */
12902                 read_val = save_val & read_mask;
12903
12904                 /* Write zero to the register, then make sure the read-only bits
12905                  * are not changed and the read/write bits are all zeros.
12906                  */
12907                 tw32(offset, 0);
12908
12909                 val = tr32(offset);
12910
12911                 /* Test the read-only and read/write bits. */
12912                 if (((val & read_mask) != read_val) || (val & write_mask))
12913                         goto out;
12914
12915                 /* Write ones to all the bits defined by RdMask and WrMask, then
12916                  * make sure the read-only bits are not changed and the
12917                  * read/write bits are all ones.
12918                  */
12919                 tw32(offset, read_mask | write_mask);
12920
12921                 val = tr32(offset);
12922
12923                 /* Test the read-only bits. */
12924                 if ((val & read_mask) != read_val)
12925                         goto out;
12926
12927                 /* Test the read/write bits. */
12928                 if ((val & write_mask) != write_mask)
12929                         goto out;
12930
12931                 tw32(offset, save_val);
12932         }
12933
12934         return 0;
12935
12936 out:
12937         if (netif_msg_hw(tp))
12938                 netdev_err(tp->dev,
12939                            "Register test failed at offset %x\n", offset);
12940         tw32(offset, save_val);
12941         return -EIO;
12942 }
12943
12944 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12945 {
12946         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12947         int i;
12948         u32 j;
12949
12950         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12951                 for (j = 0; j < len; j += 4) {
12952                         u32 val;
12953
12954                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12955                         tg3_read_mem(tp, offset + j, &val);
12956                         if (val != test_pattern[i])
12957                                 return -EIO;
12958                 }
12959         }
12960         return 0;
12961 }
12962
12963 static int tg3_test_memory(struct tg3 *tp)
12964 {
12965         static struct mem_entry {
12966                 u32 offset;
12967                 u32 len;
12968         } mem_tbl_570x[] = {
12969                 { 0x00000000, 0x00b50},
12970                 { 0x00002000, 0x1c000},
12971                 { 0xffffffff, 0x00000}
12972         }, mem_tbl_5705[] = {
12973                 { 0x00000100, 0x0000c},
12974                 { 0x00000200, 0x00008},
12975                 { 0x00004000, 0x00800},
12976                 { 0x00006000, 0x01000},
12977                 { 0x00008000, 0x02000},
12978                 { 0x00010000, 0x0e000},
12979                 { 0xffffffff, 0x00000}
12980         }, mem_tbl_5755[] = {
12981                 { 0x00000200, 0x00008},
12982                 { 0x00004000, 0x00800},
12983                 { 0x00006000, 0x00800},
12984                 { 0x00008000, 0x02000},
12985                 { 0x00010000, 0x0c000},
12986                 { 0xffffffff, 0x00000}
12987         }, mem_tbl_5906[] = {
12988                 { 0x00000200, 0x00008},
12989                 { 0x00004000, 0x00400},
12990                 { 0x00006000, 0x00400},
12991                 { 0x00008000, 0x01000},
12992                 { 0x00010000, 0x01000},
12993                 { 0xffffffff, 0x00000}
12994         }, mem_tbl_5717[] = {
12995                 { 0x00000200, 0x00008},
12996                 { 0x00010000, 0x0a000},
12997                 { 0x00020000, 0x13c00},
12998                 { 0xffffffff, 0x00000}
12999         }, mem_tbl_57765[] = {
13000                 { 0x00000200, 0x00008},
13001                 { 0x00004000, 0x00800},
13002                 { 0x00006000, 0x09800},
13003                 { 0x00010000, 0x0a000},
13004                 { 0xffffffff, 0x00000}
13005         };
13006         struct mem_entry *mem_tbl;
13007         int err = 0;
13008         int i;
13009
13010         if (tg3_flag(tp, 5717_PLUS))
13011                 mem_tbl = mem_tbl_5717;
13012         else if (tg3_flag(tp, 57765_CLASS) ||
13013                  tg3_asic_rev(tp) == ASIC_REV_5762)
13014                 mem_tbl = mem_tbl_57765;
13015         else if (tg3_flag(tp, 5755_PLUS))
13016                 mem_tbl = mem_tbl_5755;
13017         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13018                 mem_tbl = mem_tbl_5906;
13019         else if (tg3_flag(tp, 5705_PLUS))
13020                 mem_tbl = mem_tbl_5705;
13021         else
13022                 mem_tbl = mem_tbl_570x;
13023
13024         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13025                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13026                 if (err)
13027                         break;
13028         }
13029
13030         return err;
13031 }
13032
13033 #define TG3_TSO_MSS             500
13034
13035 #define TG3_TSO_IP_HDR_LEN      20
13036 #define TG3_TSO_TCP_HDR_LEN     20
13037 #define TG3_TSO_TCP_OPT_LEN     12
13038
13039 static const u8 tg3_tso_header[] = {
13040 0x08, 0x00,
13041 0x45, 0x00, 0x00, 0x00,
13042 0x00, 0x00, 0x40, 0x00,
13043 0x40, 0x06, 0x00, 0x00,
13044 0x0a, 0x00, 0x00, 0x01,
13045 0x0a, 0x00, 0x00, 0x02,
13046 0x0d, 0x00, 0xe0, 0x00,
13047 0x00, 0x00, 0x01, 0x00,
13048 0x00, 0x00, 0x02, 0x00,
13049 0x80, 0x10, 0x10, 0x00,
13050 0x14, 0x09, 0x00, 0x00,
13051 0x01, 0x01, 0x08, 0x0a,
13052 0x11, 0x11, 0x11, 0x11,
13053 0x11, 0x11, 0x11, 0x11,
13054 };
13055
13056 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13057 {
13058         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13059         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13060         u32 budget;
13061         struct sk_buff *skb;
13062         u8 *tx_data, *rx_data;
13063         dma_addr_t map;
13064         int num_pkts, tx_len, rx_len, i, err;
13065         struct tg3_rx_buffer_desc *desc;
13066         struct tg3_napi *tnapi, *rnapi;
13067         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13068
13069         tnapi = &tp->napi[0];
13070         rnapi = &tp->napi[0];
13071         if (tp->irq_cnt > 1) {
13072                 if (tg3_flag(tp, ENABLE_RSS))
13073                         rnapi = &tp->napi[1];
13074                 if (tg3_flag(tp, ENABLE_TSS))
13075                         tnapi = &tp->napi[1];
13076         }
13077         coal_now = tnapi->coal_now | rnapi->coal_now;
13078
13079         err = -EIO;
13080
13081         tx_len = pktsz;
13082         skb = netdev_alloc_skb(tp->dev, tx_len);
13083         if (!skb)
13084                 return -ENOMEM;
13085
13086         tx_data = skb_put(skb, tx_len);
13087         memcpy(tx_data, tp->dev->dev_addr, 6);
13088         memset(tx_data + 6, 0x0, 8);
13089
13090         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13091
13092         if (tso_loopback) {
13093                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13094
13095                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13096                               TG3_TSO_TCP_OPT_LEN;
13097
13098                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13099                        sizeof(tg3_tso_header));
13100                 mss = TG3_TSO_MSS;
13101
13102                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13103                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13104
13105                 /* Set the total length field in the IP header */
13106                 iph->tot_len = htons((u16)(mss + hdr_len));
13107
13108                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13109                               TXD_FLAG_CPU_POST_DMA);
13110
13111                 if (tg3_flag(tp, HW_TSO_1) ||
13112                     tg3_flag(tp, HW_TSO_2) ||
13113                     tg3_flag(tp, HW_TSO_3)) {
13114                         struct tcphdr *th;
13115                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13116                         th = (struct tcphdr *)&tx_data[val];
13117                         th->check = 0;
13118                 } else
13119                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13120
13121                 if (tg3_flag(tp, HW_TSO_3)) {
13122                         mss |= (hdr_len & 0xc) << 12;
13123                         if (hdr_len & 0x10)
13124                                 base_flags |= 0x00000010;
13125                         base_flags |= (hdr_len & 0x3e0) << 5;
13126                 } else if (tg3_flag(tp, HW_TSO_2))
13127                         mss |= hdr_len << 9;
13128                 else if (tg3_flag(tp, HW_TSO_1) ||
13129                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13130                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13131                 } else {
13132                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13133                 }
13134
13135                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13136         } else {
13137                 num_pkts = 1;
13138                 data_off = ETH_HLEN;
13139
13140                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13141                     tx_len > VLAN_ETH_FRAME_LEN)
13142                         base_flags |= TXD_FLAG_JMB_PKT;
13143         }
13144
13145         for (i = data_off; i < tx_len; i++)
13146                 tx_data[i] = (u8) (i & 0xff);
13147
13148         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13149         if (pci_dma_mapping_error(tp->pdev, map)) {
13150                 dev_kfree_skb(skb);
13151                 return -EIO;
13152         }
13153
13154         val = tnapi->tx_prod;
13155         tnapi->tx_buffers[val].skb = skb;
13156         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13157
13158         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13159                rnapi->coal_now);
13160
13161         udelay(10);
13162
13163         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13164
13165         budget = tg3_tx_avail(tnapi);
13166         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13167                             base_flags | TXD_FLAG_END, mss, 0)) {
13168                 tnapi->tx_buffers[val].skb = NULL;
13169                 dev_kfree_skb(skb);
13170                 return -EIO;
13171         }
13172
13173         tnapi->tx_prod++;
13174
13175         /* Sync BD data before updating mailbox */
13176         wmb();
13177
13178         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13179         tr32_mailbox(tnapi->prodmbox);
13180
13181         udelay(10);
13182
13183         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13184         for (i = 0; i < 35; i++) {
13185                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13186                        coal_now);
13187
13188                 udelay(10);
13189
13190                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13191                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13192                 if ((tx_idx == tnapi->tx_prod) &&
13193                     (rx_idx == (rx_start_idx + num_pkts)))
13194                         break;
13195         }
13196
13197         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13198         dev_kfree_skb(skb);
13199
13200         if (tx_idx != tnapi->tx_prod)
13201                 goto out;
13202
13203         if (rx_idx != rx_start_idx + num_pkts)
13204                 goto out;
13205
13206         val = data_off;
13207         while (rx_idx != rx_start_idx) {
13208                 desc = &rnapi->rx_rcb[rx_start_idx++];
13209                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13210                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13211
13212                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13213                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13214                         goto out;
13215
13216                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13217                          - ETH_FCS_LEN;
13218
13219                 if (!tso_loopback) {
13220                         if (rx_len != tx_len)
13221                                 goto out;
13222
13223                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13224                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13225                                         goto out;
13226                         } else {
13227                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13228                                         goto out;
13229                         }
13230                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13231                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13232                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13233                         goto out;
13234                 }
13235
13236                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13237                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13238                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13239                                              mapping);
13240                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13241                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13242                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13243                                              mapping);
13244                 } else
13245                         goto out;
13246
13247                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13248                                             PCI_DMA_FROMDEVICE);
13249
13250                 rx_data += TG3_RX_OFFSET(tp);
13251                 for (i = data_off; i < rx_len; i++, val++) {
13252                         if (*(rx_data + i) != (u8) (val & 0xff))
13253                                 goto out;
13254                 }
13255         }
13256
13257         err = 0;
13258
13259         /* tg3_free_rings will unmap and free the rx_data */
13260 out:
13261         return err;
13262 }
13263
13264 #define TG3_STD_LOOPBACK_FAILED         1
13265 #define TG3_JMB_LOOPBACK_FAILED         2
13266 #define TG3_TSO_LOOPBACK_FAILED         4
13267 #define TG3_LOOPBACK_FAILED \
13268         (TG3_STD_LOOPBACK_FAILED | \
13269          TG3_JMB_LOOPBACK_FAILED | \
13270          TG3_TSO_LOOPBACK_FAILED)
13271
13272 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13273 {
13274         int err = -EIO;
13275         u32 eee_cap;
13276         u32 jmb_pkt_sz = 9000;
13277
13278         if (tp->dma_limit)
13279                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13280
13281         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13282         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13283
13284         if (!netif_running(tp->dev)) {
13285                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13286                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13287                 if (do_extlpbk)
13288                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13289                 goto done;
13290         }
13291
13292         err = tg3_reset_hw(tp, true);
13293         if (err) {
13294                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13295                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13296                 if (do_extlpbk)
13297                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13298                 goto done;
13299         }
13300
13301         if (tg3_flag(tp, ENABLE_RSS)) {
13302                 int i;
13303
13304                 /* Reroute all rx packets to the 1st queue */
13305                 for (i = MAC_RSS_INDIR_TBL_0;
13306                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13307                         tw32(i, 0x0);
13308         }
13309
13310         /* HW errata - mac loopback fails in some cases on 5780.
13311          * Normal traffic and PHY loopback are not affected by
13312          * errata.  Also, the MAC loopback test is deprecated for
13313          * all newer ASIC revisions.
13314          */
13315         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13316             !tg3_flag(tp, CPMU_PRESENT)) {
13317                 tg3_mac_loopback(tp, true);
13318
13319                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13320                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13321
13322                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13323                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13324                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13325
13326                 tg3_mac_loopback(tp, false);
13327         }
13328
13329         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13330             !tg3_flag(tp, USE_PHYLIB)) {
13331                 int i;
13332
13333                 tg3_phy_lpbk_set(tp, 0, false);
13334
13335                 /* Wait for link */
13336                 for (i = 0; i < 100; i++) {
13337                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13338                                 break;
13339                         mdelay(1);
13340                 }
13341
13342                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13343                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13344                 if (tg3_flag(tp, TSO_CAPABLE) &&
13345                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13346                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13347                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13348                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13349                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13350
13351                 if (do_extlpbk) {
13352                         tg3_phy_lpbk_set(tp, 0, true);
13353
13354                         /* All link indications report up, but the hardware
13355                          * isn't really ready for about 20 msec.  Double it
13356                          * to be sure.
13357                          */
13358                         mdelay(40);
13359
13360                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13361                                 data[TG3_EXT_LOOPB_TEST] |=
13362                                                         TG3_STD_LOOPBACK_FAILED;
13363                         if (tg3_flag(tp, TSO_CAPABLE) &&
13364                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13365                                 data[TG3_EXT_LOOPB_TEST] |=
13366                                                         TG3_TSO_LOOPBACK_FAILED;
13367                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13368                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13369                                 data[TG3_EXT_LOOPB_TEST] |=
13370                                                         TG3_JMB_LOOPBACK_FAILED;
13371                 }
13372
13373                 /* Re-enable gphy autopowerdown. */
13374                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13375                         tg3_phy_toggle_apd(tp, true);
13376         }
13377
13378         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13379                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13380
13381 done:
13382         tp->phy_flags |= eee_cap;
13383
13384         return err;
13385 }
13386
13387 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13388                           u64 *data)
13389 {
13390         struct tg3 *tp = netdev_priv(dev);
13391         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13392
13393         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13394                 if (tg3_power_up(tp)) {
13395                         etest->flags |= ETH_TEST_FL_FAILED;
13396                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13397                         return;
13398                 }
13399                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13400         }
13401
13402         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13403
13404         if (tg3_test_nvram(tp) != 0) {
13405                 etest->flags |= ETH_TEST_FL_FAILED;
13406                 data[TG3_NVRAM_TEST] = 1;
13407         }
13408         if (!doextlpbk && tg3_test_link(tp)) {
13409                 etest->flags |= ETH_TEST_FL_FAILED;
13410                 data[TG3_LINK_TEST] = 1;
13411         }
13412         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13413                 int err, err2 = 0, irq_sync = 0;
13414
13415                 if (netif_running(dev)) {
13416                         tg3_phy_stop(tp);
13417                         tg3_netif_stop(tp);
13418                         irq_sync = 1;
13419                 }
13420
13421                 tg3_full_lock(tp, irq_sync);
13422                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13423                 err = tg3_nvram_lock(tp);
13424                 tg3_halt_cpu(tp, RX_CPU_BASE);
13425                 if (!tg3_flag(tp, 5705_PLUS))
13426                         tg3_halt_cpu(tp, TX_CPU_BASE);
13427                 if (!err)
13428                         tg3_nvram_unlock(tp);
13429
13430                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13431                         tg3_phy_reset(tp);
13432
13433                 if (tg3_test_registers(tp) != 0) {
13434                         etest->flags |= ETH_TEST_FL_FAILED;
13435                         data[TG3_REGISTER_TEST] = 1;
13436                 }
13437
13438                 if (tg3_test_memory(tp) != 0) {
13439                         etest->flags |= ETH_TEST_FL_FAILED;
13440                         data[TG3_MEMORY_TEST] = 1;
13441                 }
13442
13443                 if (doextlpbk)
13444                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13445
13446                 if (tg3_test_loopback(tp, data, doextlpbk))
13447                         etest->flags |= ETH_TEST_FL_FAILED;
13448
13449                 tg3_full_unlock(tp);
13450
13451                 if (tg3_test_interrupt(tp) != 0) {
13452                         etest->flags |= ETH_TEST_FL_FAILED;
13453                         data[TG3_INTERRUPT_TEST] = 1;
13454                 }
13455
13456                 tg3_full_lock(tp, 0);
13457
13458                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13459                 if (netif_running(dev)) {
13460                         tg3_flag_set(tp, INIT_COMPLETE);
13461                         err2 = tg3_restart_hw(tp, true);
13462                         if (!err2)
13463                                 tg3_netif_start(tp);
13464                 }
13465
13466                 tg3_full_unlock(tp);
13467
13468                 if (irq_sync && !err2)
13469                         tg3_phy_start(tp);
13470         }
13471         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13472                 tg3_power_down(tp);
13473
13474 }
13475
13476 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13477                               struct ifreq *ifr, int cmd)
13478 {
13479         struct tg3 *tp = netdev_priv(dev);
13480         struct hwtstamp_config stmpconf;
13481
13482         if (!tg3_flag(tp, PTP_CAPABLE))
13483                 return -EINVAL;
13484
13485         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13486                 return -EFAULT;
13487
13488         if (stmpconf.flags)
13489                 return -EINVAL;
13490
13491         switch (stmpconf.tx_type) {
13492         case HWTSTAMP_TX_ON:
13493                 tg3_flag_set(tp, TX_TSTAMP_EN);
13494                 break;
13495         case HWTSTAMP_TX_OFF:
13496                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13497                 break;
13498         default:
13499                 return -ERANGE;
13500         }
13501
13502         switch (stmpconf.rx_filter) {
13503         case HWTSTAMP_FILTER_NONE:
13504                 tp->rxptpctl = 0;
13505                 break;
13506         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13507                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13508                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13509                 break;
13510         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13511                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13512                                TG3_RX_PTP_CTL_SYNC_EVNT;
13513                 break;
13514         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13515                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13516                                TG3_RX_PTP_CTL_DELAY_REQ;
13517                 break;
13518         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13519                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13520                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13521                 break;
13522         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13523                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13524                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13525                 break;
13526         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13527                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13528                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13529                 break;
13530         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13531                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13532                                TG3_RX_PTP_CTL_SYNC_EVNT;
13533                 break;
13534         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13535                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13536                                TG3_RX_PTP_CTL_SYNC_EVNT;
13537                 break;
13538         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13539                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13540                                TG3_RX_PTP_CTL_SYNC_EVNT;
13541                 break;
13542         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13543                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13544                                TG3_RX_PTP_CTL_DELAY_REQ;
13545                 break;
13546         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13547                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13548                                TG3_RX_PTP_CTL_DELAY_REQ;
13549                 break;
13550         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13551                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13552                                TG3_RX_PTP_CTL_DELAY_REQ;
13553                 break;
13554         default:
13555                 return -ERANGE;
13556         }
13557
13558         if (netif_running(dev) && tp->rxptpctl)
13559                 tw32(TG3_RX_PTP_CTL,
13560                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13561
13562         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13563                 -EFAULT : 0;
13564 }
13565
13566 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13567 {
13568         struct mii_ioctl_data *data = if_mii(ifr);
13569         struct tg3 *tp = netdev_priv(dev);
13570         int err;
13571
13572         if (tg3_flag(tp, USE_PHYLIB)) {
13573                 struct phy_device *phydev;
13574                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13575                         return -EAGAIN;
13576                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13577                 return phy_mii_ioctl(phydev, ifr, cmd);
13578         }
13579
13580         switch (cmd) {
13581         case SIOCGMIIPHY:
13582                 data->phy_id = tp->phy_addr;
13583
13584                 /* fallthru */
13585         case SIOCGMIIREG: {
13586                 u32 mii_regval;
13587
13588                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13589                         break;                  /* We have no PHY */
13590
13591                 if (!netif_running(dev))
13592                         return -EAGAIN;
13593
13594                 spin_lock_bh(&tp->lock);
13595                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13596                                     data->reg_num & 0x1f, &mii_regval);
13597                 spin_unlock_bh(&tp->lock);
13598
13599                 data->val_out = mii_regval;
13600
13601                 return err;
13602         }
13603
13604         case SIOCSMIIREG:
13605                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13606                         break;                  /* We have no PHY */
13607
13608                 if (!netif_running(dev))
13609                         return -EAGAIN;
13610
13611                 spin_lock_bh(&tp->lock);
13612                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13613                                      data->reg_num & 0x1f, data->val_in);
13614                 spin_unlock_bh(&tp->lock);
13615
13616                 return err;
13617
13618         case SIOCSHWTSTAMP:
13619                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13620
13621         default:
13622                 /* do nothing */
13623                 break;
13624         }
13625         return -EOPNOTSUPP;
13626 }
13627
13628 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13629 {
13630         struct tg3 *tp = netdev_priv(dev);
13631
13632         memcpy(ec, &tp->coal, sizeof(*ec));
13633         return 0;
13634 }
13635
13636 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13637 {
13638         struct tg3 *tp = netdev_priv(dev);
13639         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13640         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13641
13642         if (!tg3_flag(tp, 5705_PLUS)) {
13643                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13644                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13645                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13646                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13647         }
13648
13649         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13650             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13651             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13652             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13653             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13654             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13655             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13656             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13657             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13658             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13659                 return -EINVAL;
13660
13661         /* No rx interrupts will be generated if both are zero */
13662         if ((ec->rx_coalesce_usecs == 0) &&
13663             (ec->rx_max_coalesced_frames == 0))
13664                 return -EINVAL;
13665
13666         /* No tx interrupts will be generated if both are zero */
13667         if ((ec->tx_coalesce_usecs == 0) &&
13668             (ec->tx_max_coalesced_frames == 0))
13669                 return -EINVAL;
13670
13671         /* Only copy relevant parameters, ignore all others. */
13672         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13673         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13674         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13675         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13676         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13677         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13678         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13679         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13680         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13681
13682         if (netif_running(dev)) {
13683                 tg3_full_lock(tp, 0);
13684                 __tg3_set_coalesce(tp, &tp->coal);
13685                 tg3_full_unlock(tp);
13686         }
13687         return 0;
13688 }
13689
13690 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13691 {
13692         struct tg3 *tp = netdev_priv(dev);
13693
13694         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13695                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13696                 return -EOPNOTSUPP;
13697         }
13698
13699         if (edata->advertised != tp->eee.advertised) {
13700                 netdev_warn(tp->dev,
13701                             "Direct manipulation of EEE advertisement is not supported\n");
13702                 return -EINVAL;
13703         }
13704
13705         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13706                 netdev_warn(tp->dev,
13707                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13708                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13709                 return -EINVAL;
13710         }
13711
13712         tp->eee = *edata;
13713
13714         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13715         tg3_warn_mgmt_link_flap(tp);
13716
13717         if (netif_running(tp->dev)) {
13718                 tg3_full_lock(tp, 0);
13719                 tg3_setup_eee(tp);
13720                 tg3_phy_reset(tp);
13721                 tg3_full_unlock(tp);
13722         }
13723
13724         return 0;
13725 }
13726
13727 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13728 {
13729         struct tg3 *tp = netdev_priv(dev);
13730
13731         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13732                 netdev_warn(tp->dev,
13733                             "Board does not support EEE!\n");
13734                 return -EOPNOTSUPP;
13735         }
13736
13737         *edata = tp->eee;
13738         return 0;
13739 }
13740
13741 static const struct ethtool_ops tg3_ethtool_ops = {
13742         .get_settings           = tg3_get_settings,
13743         .set_settings           = tg3_set_settings,
13744         .get_drvinfo            = tg3_get_drvinfo,
13745         .get_regs_len           = tg3_get_regs_len,
13746         .get_regs               = tg3_get_regs,
13747         .get_wol                = tg3_get_wol,
13748         .set_wol                = tg3_set_wol,
13749         .get_msglevel           = tg3_get_msglevel,
13750         .set_msglevel           = tg3_set_msglevel,
13751         .nway_reset             = tg3_nway_reset,
13752         .get_link               = ethtool_op_get_link,
13753         .get_eeprom_len         = tg3_get_eeprom_len,
13754         .get_eeprom             = tg3_get_eeprom,
13755         .set_eeprom             = tg3_set_eeprom,
13756         .get_ringparam          = tg3_get_ringparam,
13757         .set_ringparam          = tg3_set_ringparam,
13758         .get_pauseparam         = tg3_get_pauseparam,
13759         .set_pauseparam         = tg3_set_pauseparam,
13760         .self_test              = tg3_self_test,
13761         .get_strings            = tg3_get_strings,
13762         .set_phys_id            = tg3_set_phys_id,
13763         .get_ethtool_stats      = tg3_get_ethtool_stats,
13764         .get_coalesce           = tg3_get_coalesce,
13765         .set_coalesce           = tg3_set_coalesce,
13766         .get_sset_count         = tg3_get_sset_count,
13767         .get_rxnfc              = tg3_get_rxnfc,
13768         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13769         .get_rxfh_indir         = tg3_get_rxfh_indir,
13770         .set_rxfh_indir         = tg3_set_rxfh_indir,
13771         .get_channels           = tg3_get_channels,
13772         .set_channels           = tg3_set_channels,
13773         .get_ts_info            = tg3_get_ts_info,
13774         .get_eee                = tg3_get_eee,
13775         .set_eee                = tg3_set_eee,
13776 };
13777
13778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13779                                                 struct rtnl_link_stats64 *stats)
13780 {
13781         struct tg3 *tp = netdev_priv(dev);
13782
13783         spin_lock_bh(&tp->lock);
13784         if (!tp->hw_stats) {
13785                 spin_unlock_bh(&tp->lock);
13786                 return &tp->net_stats_prev;
13787         }
13788
13789         tg3_get_nstats(tp, stats);
13790         spin_unlock_bh(&tp->lock);
13791
13792         return stats;
13793 }
13794
13795 static void tg3_set_rx_mode(struct net_device *dev)
13796 {
13797         struct tg3 *tp = netdev_priv(dev);
13798
13799         if (!netif_running(dev))
13800                 return;
13801
13802         tg3_full_lock(tp, 0);
13803         __tg3_set_rx_mode(dev);
13804         tg3_full_unlock(tp);
13805 }
13806
13807 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13808                                int new_mtu)
13809 {
13810         dev->mtu = new_mtu;
13811
13812         if (new_mtu > ETH_DATA_LEN) {
13813                 if (tg3_flag(tp, 5780_CLASS)) {
13814                         netdev_update_features(dev);
13815                         tg3_flag_clear(tp, TSO_CAPABLE);
13816                 } else {
13817                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13818                 }
13819         } else {
13820                 if (tg3_flag(tp, 5780_CLASS)) {
13821                         tg3_flag_set(tp, TSO_CAPABLE);
13822                         netdev_update_features(dev);
13823                 }
13824                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13825         }
13826 }
13827
13828 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13829 {
13830         struct tg3 *tp = netdev_priv(dev);
13831         int err;
13832         bool reset_phy = false;
13833
13834         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13835                 return -EINVAL;
13836
13837         if (!netif_running(dev)) {
13838                 /* We'll just catch it later when the
13839                  * device is up'd.
13840                  */
13841                 tg3_set_mtu(dev, tp, new_mtu);
13842                 return 0;
13843         }
13844
13845         tg3_phy_stop(tp);
13846
13847         tg3_netif_stop(tp);
13848
13849         tg3_full_lock(tp, 1);
13850
13851         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13852
13853         tg3_set_mtu(dev, tp, new_mtu);
13854
13855         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13856          * breaks all requests to 256 bytes.
13857          */
13858         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13859                 reset_phy = true;
13860
13861         err = tg3_restart_hw(tp, reset_phy);
13862
13863         if (!err)
13864                 tg3_netif_start(tp);
13865
13866         tg3_full_unlock(tp);
13867
13868         if (!err)
13869                 tg3_phy_start(tp);
13870
13871         return err;
13872 }
13873
13874 static const struct net_device_ops tg3_netdev_ops = {
13875         .ndo_open               = tg3_open,
13876         .ndo_stop               = tg3_close,
13877         .ndo_start_xmit         = tg3_start_xmit,
13878         .ndo_get_stats64        = tg3_get_stats64,
13879         .ndo_validate_addr      = eth_validate_addr,
13880         .ndo_set_rx_mode        = tg3_set_rx_mode,
13881         .ndo_set_mac_address    = tg3_set_mac_addr,
13882         .ndo_do_ioctl           = tg3_ioctl,
13883         .ndo_tx_timeout         = tg3_tx_timeout,
13884         .ndo_change_mtu         = tg3_change_mtu,
13885         .ndo_fix_features       = tg3_fix_features,
13886         .ndo_set_features       = tg3_set_features,
13887 #ifdef CONFIG_NET_POLL_CONTROLLER
13888         .ndo_poll_controller    = tg3_poll_controller,
13889 #endif
13890 };
13891
13892 static void tg3_get_eeprom_size(struct tg3 *tp)
13893 {
13894         u32 cursize, val, magic;
13895
13896         tp->nvram_size = EEPROM_CHIP_SIZE;
13897
13898         if (tg3_nvram_read(tp, 0, &magic) != 0)
13899                 return;
13900
13901         if ((magic != TG3_EEPROM_MAGIC) &&
13902             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13903             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13904                 return;
13905
13906         /*
13907          * Size the chip by reading offsets at increasing powers of two.
13908          * When we encounter our validation signature, we know the addressing
13909          * has wrapped around, and thus have our chip size.
13910          */
13911         cursize = 0x10;
13912
13913         while (cursize < tp->nvram_size) {
13914                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13915                         return;
13916
13917                 if (val == magic)
13918                         break;
13919
13920                 cursize <<= 1;
13921         }
13922
13923         tp->nvram_size = cursize;
13924 }
13925
13926 static void tg3_get_nvram_size(struct tg3 *tp)
13927 {
13928         u32 val;
13929
13930         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13931                 return;
13932
13933         /* Selfboot format */
13934         if (val != TG3_EEPROM_MAGIC) {
13935                 tg3_get_eeprom_size(tp);
13936                 return;
13937         }
13938
13939         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13940                 if (val != 0) {
13941                         /* This is confusing.  We want to operate on the
13942                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13943                          * call will read from NVRAM and byteswap the data
13944                          * according to the byteswapping settings for all
13945                          * other register accesses.  This ensures the data we
13946                          * want will always reside in the lower 16-bits.
13947                          * However, the data in NVRAM is in LE format, which
13948                          * means the data from the NVRAM read will always be
13949                          * opposite the endianness of the CPU.  The 16-bit
13950                          * byteswap then brings the data to CPU endianness.
13951                          */
13952                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13953                         return;
13954                 }
13955         }
13956         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13957 }
13958
13959 static void tg3_get_nvram_info(struct tg3 *tp)
13960 {
13961         u32 nvcfg1;
13962
13963         nvcfg1 = tr32(NVRAM_CFG1);
13964         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13965                 tg3_flag_set(tp, FLASH);
13966         } else {
13967                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13968                 tw32(NVRAM_CFG1, nvcfg1);
13969         }
13970
13971         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13972             tg3_flag(tp, 5780_CLASS)) {
13973                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13974                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13975                         tp->nvram_jedecnum = JEDEC_ATMEL;
13976                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13977                         tg3_flag_set(tp, NVRAM_BUFFERED);
13978                         break;
13979                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13980                         tp->nvram_jedecnum = JEDEC_ATMEL;
13981                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13982                         break;
13983                 case FLASH_VENDOR_ATMEL_EEPROM:
13984                         tp->nvram_jedecnum = JEDEC_ATMEL;
13985                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13986                         tg3_flag_set(tp, NVRAM_BUFFERED);
13987                         break;
13988                 case FLASH_VENDOR_ST:
13989                         tp->nvram_jedecnum = JEDEC_ST;
13990                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13991                         tg3_flag_set(tp, NVRAM_BUFFERED);
13992                         break;
13993                 case FLASH_VENDOR_SAIFUN:
13994                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13995                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13996                         break;
13997                 case FLASH_VENDOR_SST_SMALL:
13998                 case FLASH_VENDOR_SST_LARGE:
13999                         tp->nvram_jedecnum = JEDEC_SST;
14000                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14001                         break;
14002                 }
14003         } else {
14004                 tp->nvram_jedecnum = JEDEC_ATMEL;
14005                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14006                 tg3_flag_set(tp, NVRAM_BUFFERED);
14007         }
14008 }
14009
14010 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14011 {
14012         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14013         case FLASH_5752PAGE_SIZE_256:
14014                 tp->nvram_pagesize = 256;
14015                 break;
14016         case FLASH_5752PAGE_SIZE_512:
14017                 tp->nvram_pagesize = 512;
14018                 break;
14019         case FLASH_5752PAGE_SIZE_1K:
14020                 tp->nvram_pagesize = 1024;
14021                 break;
14022         case FLASH_5752PAGE_SIZE_2K:
14023                 tp->nvram_pagesize = 2048;
14024                 break;
14025         case FLASH_5752PAGE_SIZE_4K:
14026                 tp->nvram_pagesize = 4096;
14027                 break;
14028         case FLASH_5752PAGE_SIZE_264:
14029                 tp->nvram_pagesize = 264;
14030                 break;
14031         case FLASH_5752PAGE_SIZE_528:
14032                 tp->nvram_pagesize = 528;
14033                 break;
14034         }
14035 }
14036
14037 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14038 {
14039         u32 nvcfg1;
14040
14041         nvcfg1 = tr32(NVRAM_CFG1);
14042
14043         /* NVRAM protection for TPM */
14044         if (nvcfg1 & (1 << 27))
14045                 tg3_flag_set(tp, PROTECTED_NVRAM);
14046
14047         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14048         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14049         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14050                 tp->nvram_jedecnum = JEDEC_ATMEL;
14051                 tg3_flag_set(tp, NVRAM_BUFFERED);
14052                 break;
14053         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14054                 tp->nvram_jedecnum = JEDEC_ATMEL;
14055                 tg3_flag_set(tp, NVRAM_BUFFERED);
14056                 tg3_flag_set(tp, FLASH);
14057                 break;
14058         case FLASH_5752VENDOR_ST_M45PE10:
14059         case FLASH_5752VENDOR_ST_M45PE20:
14060         case FLASH_5752VENDOR_ST_M45PE40:
14061                 tp->nvram_jedecnum = JEDEC_ST;
14062                 tg3_flag_set(tp, NVRAM_BUFFERED);
14063                 tg3_flag_set(tp, FLASH);
14064                 break;
14065         }
14066
14067         if (tg3_flag(tp, FLASH)) {
14068                 tg3_nvram_get_pagesize(tp, nvcfg1);
14069         } else {
14070                 /* For eeprom, set pagesize to maximum eeprom size */
14071                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14072
14073                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14074                 tw32(NVRAM_CFG1, nvcfg1);
14075         }
14076 }
14077
14078 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14079 {
14080         u32 nvcfg1, protect = 0;
14081
14082         nvcfg1 = tr32(NVRAM_CFG1);
14083
14084         /* NVRAM protection for TPM */
14085         if (nvcfg1 & (1 << 27)) {
14086                 tg3_flag_set(tp, PROTECTED_NVRAM);
14087                 protect = 1;
14088         }
14089
14090         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14091         switch (nvcfg1) {
14092         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14093         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14094         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14095         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14096                 tp->nvram_jedecnum = JEDEC_ATMEL;
14097                 tg3_flag_set(tp, NVRAM_BUFFERED);
14098                 tg3_flag_set(tp, FLASH);
14099                 tp->nvram_pagesize = 264;
14100                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14101                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14102                         tp->nvram_size = (protect ? 0x3e200 :
14103                                           TG3_NVRAM_SIZE_512KB);
14104                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14105                         tp->nvram_size = (protect ? 0x1f200 :
14106                                           TG3_NVRAM_SIZE_256KB);
14107                 else
14108                         tp->nvram_size = (protect ? 0x1f200 :
14109                                           TG3_NVRAM_SIZE_128KB);
14110                 break;
14111         case FLASH_5752VENDOR_ST_M45PE10:
14112         case FLASH_5752VENDOR_ST_M45PE20:
14113         case FLASH_5752VENDOR_ST_M45PE40:
14114                 tp->nvram_jedecnum = JEDEC_ST;
14115                 tg3_flag_set(tp, NVRAM_BUFFERED);
14116                 tg3_flag_set(tp, FLASH);
14117                 tp->nvram_pagesize = 256;
14118                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14119                         tp->nvram_size = (protect ?
14120                                           TG3_NVRAM_SIZE_64KB :
14121                                           TG3_NVRAM_SIZE_128KB);
14122                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14123                         tp->nvram_size = (protect ?
14124                                           TG3_NVRAM_SIZE_64KB :
14125                                           TG3_NVRAM_SIZE_256KB);
14126                 else
14127                         tp->nvram_size = (protect ?
14128                                           TG3_NVRAM_SIZE_128KB :
14129                                           TG3_NVRAM_SIZE_512KB);
14130                 break;
14131         }
14132 }
14133
14134 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14135 {
14136         u32 nvcfg1;
14137
14138         nvcfg1 = tr32(NVRAM_CFG1);
14139
14140         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14141         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14142         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14143         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14144         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14145                 tp->nvram_jedecnum = JEDEC_ATMEL;
14146                 tg3_flag_set(tp, NVRAM_BUFFERED);
14147                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14148
14149                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14150                 tw32(NVRAM_CFG1, nvcfg1);
14151                 break;
14152         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14153         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14154         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14155         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14156                 tp->nvram_jedecnum = JEDEC_ATMEL;
14157                 tg3_flag_set(tp, NVRAM_BUFFERED);
14158                 tg3_flag_set(tp, FLASH);
14159                 tp->nvram_pagesize = 264;
14160                 break;
14161         case FLASH_5752VENDOR_ST_M45PE10:
14162         case FLASH_5752VENDOR_ST_M45PE20:
14163         case FLASH_5752VENDOR_ST_M45PE40:
14164                 tp->nvram_jedecnum = JEDEC_ST;
14165                 tg3_flag_set(tp, NVRAM_BUFFERED);
14166                 tg3_flag_set(tp, FLASH);
14167                 tp->nvram_pagesize = 256;
14168                 break;
14169         }
14170 }
14171
14172 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14173 {
14174         u32 nvcfg1, protect = 0;
14175
14176         nvcfg1 = tr32(NVRAM_CFG1);
14177
14178         /* NVRAM protection for TPM */
14179         if (nvcfg1 & (1 << 27)) {
14180                 tg3_flag_set(tp, PROTECTED_NVRAM);
14181                 protect = 1;
14182         }
14183
14184         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14185         switch (nvcfg1) {
14186         case FLASH_5761VENDOR_ATMEL_ADB021D:
14187         case FLASH_5761VENDOR_ATMEL_ADB041D:
14188         case FLASH_5761VENDOR_ATMEL_ADB081D:
14189         case FLASH_5761VENDOR_ATMEL_ADB161D:
14190         case FLASH_5761VENDOR_ATMEL_MDB021D:
14191         case FLASH_5761VENDOR_ATMEL_MDB041D:
14192         case FLASH_5761VENDOR_ATMEL_MDB081D:
14193         case FLASH_5761VENDOR_ATMEL_MDB161D:
14194                 tp->nvram_jedecnum = JEDEC_ATMEL;
14195                 tg3_flag_set(tp, NVRAM_BUFFERED);
14196                 tg3_flag_set(tp, FLASH);
14197                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14198                 tp->nvram_pagesize = 256;
14199                 break;
14200         case FLASH_5761VENDOR_ST_A_M45PE20:
14201         case FLASH_5761VENDOR_ST_A_M45PE40:
14202         case FLASH_5761VENDOR_ST_A_M45PE80:
14203         case FLASH_5761VENDOR_ST_A_M45PE16:
14204         case FLASH_5761VENDOR_ST_M_M45PE20:
14205         case FLASH_5761VENDOR_ST_M_M45PE40:
14206         case FLASH_5761VENDOR_ST_M_M45PE80:
14207         case FLASH_5761VENDOR_ST_M_M45PE16:
14208                 tp->nvram_jedecnum = JEDEC_ST;
14209                 tg3_flag_set(tp, NVRAM_BUFFERED);
14210                 tg3_flag_set(tp, FLASH);
14211                 tp->nvram_pagesize = 256;
14212                 break;
14213         }
14214
14215         if (protect) {
14216                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14217         } else {
14218                 switch (nvcfg1) {
14219                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14220                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14221                 case FLASH_5761VENDOR_ST_A_M45PE16:
14222                 case FLASH_5761VENDOR_ST_M_M45PE16:
14223                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14224                         break;
14225                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14226                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14227                 case FLASH_5761VENDOR_ST_A_M45PE80:
14228                 case FLASH_5761VENDOR_ST_M_M45PE80:
14229                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14230                         break;
14231                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14232                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14233                 case FLASH_5761VENDOR_ST_A_M45PE40:
14234                 case FLASH_5761VENDOR_ST_M_M45PE40:
14235                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14236                         break;
14237                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14238                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14239                 case FLASH_5761VENDOR_ST_A_M45PE20:
14240                 case FLASH_5761VENDOR_ST_M_M45PE20:
14241                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14242                         break;
14243                 }
14244         }
14245 }
14246
14247 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14248 {
14249         tp->nvram_jedecnum = JEDEC_ATMEL;
14250         tg3_flag_set(tp, NVRAM_BUFFERED);
14251         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14252 }
14253
14254 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14255 {
14256         u32 nvcfg1;
14257
14258         nvcfg1 = tr32(NVRAM_CFG1);
14259
14260         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14261         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14262         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14263                 tp->nvram_jedecnum = JEDEC_ATMEL;
14264                 tg3_flag_set(tp, NVRAM_BUFFERED);
14265                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14266
14267                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14268                 tw32(NVRAM_CFG1, nvcfg1);
14269                 return;
14270         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14271         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14272         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14273         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14274         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14275         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14276         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14277                 tp->nvram_jedecnum = JEDEC_ATMEL;
14278                 tg3_flag_set(tp, NVRAM_BUFFERED);
14279                 tg3_flag_set(tp, FLASH);
14280
14281                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14282                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14283                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14284                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14285                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14286                         break;
14287                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14288                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14289                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14290                         break;
14291                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14292                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14293                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14294                         break;
14295                 }
14296                 break;
14297         case FLASH_5752VENDOR_ST_M45PE10:
14298         case FLASH_5752VENDOR_ST_M45PE20:
14299         case FLASH_5752VENDOR_ST_M45PE40:
14300                 tp->nvram_jedecnum = JEDEC_ST;
14301                 tg3_flag_set(tp, NVRAM_BUFFERED);
14302                 tg3_flag_set(tp, FLASH);
14303
14304                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14305                 case FLASH_5752VENDOR_ST_M45PE10:
14306                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14307                         break;
14308                 case FLASH_5752VENDOR_ST_M45PE20:
14309                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14310                         break;
14311                 case FLASH_5752VENDOR_ST_M45PE40:
14312                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14313                         break;
14314                 }
14315                 break;
14316         default:
14317                 tg3_flag_set(tp, NO_NVRAM);
14318                 return;
14319         }
14320
14321         tg3_nvram_get_pagesize(tp, nvcfg1);
14322         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14323                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14324 }
14325
14326
14327 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14328 {
14329         u32 nvcfg1;
14330
14331         nvcfg1 = tr32(NVRAM_CFG1);
14332
14333         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14334         case FLASH_5717VENDOR_ATMEL_EEPROM:
14335         case FLASH_5717VENDOR_MICRO_EEPROM:
14336                 tp->nvram_jedecnum = JEDEC_ATMEL;
14337                 tg3_flag_set(tp, NVRAM_BUFFERED);
14338                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14339
14340                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14341                 tw32(NVRAM_CFG1, nvcfg1);
14342                 return;
14343         case FLASH_5717VENDOR_ATMEL_MDB011D:
14344         case FLASH_5717VENDOR_ATMEL_ADB011B:
14345         case FLASH_5717VENDOR_ATMEL_ADB011D:
14346         case FLASH_5717VENDOR_ATMEL_MDB021D:
14347         case FLASH_5717VENDOR_ATMEL_ADB021B:
14348         case FLASH_5717VENDOR_ATMEL_ADB021D:
14349         case FLASH_5717VENDOR_ATMEL_45USPT:
14350                 tp->nvram_jedecnum = JEDEC_ATMEL;
14351                 tg3_flag_set(tp, NVRAM_BUFFERED);
14352                 tg3_flag_set(tp, FLASH);
14353
14354                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14355                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14356                         /* Detect size with tg3_nvram_get_size() */
14357                         break;
14358                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14359                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14360                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14361                         break;
14362                 default:
14363                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14364                         break;
14365                 }
14366                 break;
14367         case FLASH_5717VENDOR_ST_M_M25PE10:
14368         case FLASH_5717VENDOR_ST_A_M25PE10:
14369         case FLASH_5717VENDOR_ST_M_M45PE10:
14370         case FLASH_5717VENDOR_ST_A_M45PE10:
14371         case FLASH_5717VENDOR_ST_M_M25PE20:
14372         case FLASH_5717VENDOR_ST_A_M25PE20:
14373         case FLASH_5717VENDOR_ST_M_M45PE20:
14374         case FLASH_5717VENDOR_ST_A_M45PE20:
14375         case FLASH_5717VENDOR_ST_25USPT:
14376         case FLASH_5717VENDOR_ST_45USPT:
14377                 tp->nvram_jedecnum = JEDEC_ST;
14378                 tg3_flag_set(tp, NVRAM_BUFFERED);
14379                 tg3_flag_set(tp, FLASH);
14380
14381                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14382                 case FLASH_5717VENDOR_ST_M_M25PE20:
14383                 case FLASH_5717VENDOR_ST_M_M45PE20:
14384                         /* Detect size with tg3_nvram_get_size() */
14385                         break;
14386                 case FLASH_5717VENDOR_ST_A_M25PE20:
14387                 case FLASH_5717VENDOR_ST_A_M45PE20:
14388                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14389                         break;
14390                 default:
14391                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14392                         break;
14393                 }
14394                 break;
14395         default:
14396                 tg3_flag_set(tp, NO_NVRAM);
14397                 return;
14398         }
14399
14400         tg3_nvram_get_pagesize(tp, nvcfg1);
14401         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14402                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14403 }
14404
14405 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14406 {
14407         u32 nvcfg1, nvmpinstrp;
14408
14409         nvcfg1 = tr32(NVRAM_CFG1);
14410         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14411
14412         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14413                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14414                         tg3_flag_set(tp, NO_NVRAM);
14415                         return;
14416                 }
14417
14418                 switch (nvmpinstrp) {
14419                 case FLASH_5762_EEPROM_HD:
14420                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14421                         break;
14422                 case FLASH_5762_EEPROM_LD:
14423                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14424                         break;
14425                 case FLASH_5720VENDOR_M_ST_M45PE20:
14426                         /* This pinstrap supports multiple sizes, so force it
14427                          * to read the actual size from location 0xf0.
14428                          */
14429                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14430                         break;
14431                 }
14432         }
14433
14434         switch (nvmpinstrp) {
14435         case FLASH_5720_EEPROM_HD:
14436         case FLASH_5720_EEPROM_LD:
14437                 tp->nvram_jedecnum = JEDEC_ATMEL;
14438                 tg3_flag_set(tp, NVRAM_BUFFERED);
14439
14440                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14441                 tw32(NVRAM_CFG1, nvcfg1);
14442                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14443                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14444                 else
14445                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14446                 return;
14447         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14448         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14449         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14450         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14451         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14452         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14453         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14454         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14455         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14456         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14457         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14458         case FLASH_5720VENDOR_ATMEL_45USPT:
14459                 tp->nvram_jedecnum = JEDEC_ATMEL;
14460                 tg3_flag_set(tp, NVRAM_BUFFERED);
14461                 tg3_flag_set(tp, FLASH);
14462
14463                 switch (nvmpinstrp) {
14464                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14465                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14466                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14467                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14468                         break;
14469                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14470                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14471                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14472                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14473                         break;
14474                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14475                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14476                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14477                         break;
14478                 default:
14479                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14480                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14481                         break;
14482                 }
14483                 break;
14484         case FLASH_5720VENDOR_M_ST_M25PE10:
14485         case FLASH_5720VENDOR_M_ST_M45PE10:
14486         case FLASH_5720VENDOR_A_ST_M25PE10:
14487         case FLASH_5720VENDOR_A_ST_M45PE10:
14488         case FLASH_5720VENDOR_M_ST_M25PE20:
14489         case FLASH_5720VENDOR_M_ST_M45PE20:
14490         case FLASH_5720VENDOR_A_ST_M25PE20:
14491         case FLASH_5720VENDOR_A_ST_M45PE20:
14492         case FLASH_5720VENDOR_M_ST_M25PE40:
14493         case FLASH_5720VENDOR_M_ST_M45PE40:
14494         case FLASH_5720VENDOR_A_ST_M25PE40:
14495         case FLASH_5720VENDOR_A_ST_M45PE40:
14496         case FLASH_5720VENDOR_M_ST_M25PE80:
14497         case FLASH_5720VENDOR_M_ST_M45PE80:
14498         case FLASH_5720VENDOR_A_ST_M25PE80:
14499         case FLASH_5720VENDOR_A_ST_M45PE80:
14500         case FLASH_5720VENDOR_ST_25USPT:
14501         case FLASH_5720VENDOR_ST_45USPT:
14502                 tp->nvram_jedecnum = JEDEC_ST;
14503                 tg3_flag_set(tp, NVRAM_BUFFERED);
14504                 tg3_flag_set(tp, FLASH);
14505
14506                 switch (nvmpinstrp) {
14507                 case FLASH_5720VENDOR_M_ST_M25PE20:
14508                 case FLASH_5720VENDOR_M_ST_M45PE20:
14509                 case FLASH_5720VENDOR_A_ST_M25PE20:
14510                 case FLASH_5720VENDOR_A_ST_M45PE20:
14511                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14512                         break;
14513                 case FLASH_5720VENDOR_M_ST_M25PE40:
14514                 case FLASH_5720VENDOR_M_ST_M45PE40:
14515                 case FLASH_5720VENDOR_A_ST_M25PE40:
14516                 case FLASH_5720VENDOR_A_ST_M45PE40:
14517                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14518                         break;
14519                 case FLASH_5720VENDOR_M_ST_M25PE80:
14520                 case FLASH_5720VENDOR_M_ST_M45PE80:
14521                 case FLASH_5720VENDOR_A_ST_M25PE80:
14522                 case FLASH_5720VENDOR_A_ST_M45PE80:
14523                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14524                         break;
14525                 default:
14526                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14527                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14528                         break;
14529                 }
14530                 break;
14531         default:
14532                 tg3_flag_set(tp, NO_NVRAM);
14533                 return;
14534         }
14535
14536         tg3_nvram_get_pagesize(tp, nvcfg1);
14537         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14538                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14539
14540         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14541                 u32 val;
14542
14543                 if (tg3_nvram_read(tp, 0, &val))
14544                         return;
14545
14546                 if (val != TG3_EEPROM_MAGIC &&
14547                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14548                         tg3_flag_set(tp, NO_NVRAM);
14549         }
14550 }
14551
14552 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14553 static void tg3_nvram_init(struct tg3 *tp)
14554 {
14555         if (tg3_flag(tp, IS_SSB_CORE)) {
14556                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14557                 tg3_flag_clear(tp, NVRAM);
14558                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14559                 tg3_flag_set(tp, NO_NVRAM);
14560                 return;
14561         }
14562
14563         tw32_f(GRC_EEPROM_ADDR,
14564              (EEPROM_ADDR_FSM_RESET |
14565               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14566                EEPROM_ADDR_CLKPERD_SHIFT)));
14567
14568         msleep(1);
14569
14570         /* Enable seeprom accesses. */
14571         tw32_f(GRC_LOCAL_CTRL,
14572              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14573         udelay(100);
14574
14575         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14576             tg3_asic_rev(tp) != ASIC_REV_5701) {
14577                 tg3_flag_set(tp, NVRAM);
14578
14579                 if (tg3_nvram_lock(tp)) {
14580                         netdev_warn(tp->dev,
14581                                     "Cannot get nvram lock, %s failed\n",
14582                                     __func__);
14583                         return;
14584                 }
14585                 tg3_enable_nvram_access(tp);
14586
14587                 tp->nvram_size = 0;
14588
14589                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14590                         tg3_get_5752_nvram_info(tp);
14591                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14592                         tg3_get_5755_nvram_info(tp);
14593                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14594                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14595                          tg3_asic_rev(tp) == ASIC_REV_5785)
14596                         tg3_get_5787_nvram_info(tp);
14597                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14598                         tg3_get_5761_nvram_info(tp);
14599                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14600                         tg3_get_5906_nvram_info(tp);
14601                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14602                          tg3_flag(tp, 57765_CLASS))
14603                         tg3_get_57780_nvram_info(tp);
14604                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14605                          tg3_asic_rev(tp) == ASIC_REV_5719)
14606                         tg3_get_5717_nvram_info(tp);
14607                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14608                          tg3_asic_rev(tp) == ASIC_REV_5762)
14609                         tg3_get_5720_nvram_info(tp);
14610                 else
14611                         tg3_get_nvram_info(tp);
14612
14613                 if (tp->nvram_size == 0)
14614                         tg3_get_nvram_size(tp);
14615
14616                 tg3_disable_nvram_access(tp);
14617                 tg3_nvram_unlock(tp);
14618
14619         } else {
14620                 tg3_flag_clear(tp, NVRAM);
14621                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14622
14623                 tg3_get_eeprom_size(tp);
14624         }
14625 }
14626
14627 struct subsys_tbl_ent {
14628         u16 subsys_vendor, subsys_devid;
14629         u32 phy_id;
14630 };
14631
14632 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14633         /* Broadcom boards. */
14634         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14635           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14636         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14637           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14638         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14639           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14640         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14641           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14642         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14643           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14644         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14645           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14646         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14647           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14648         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14649           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14650         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14651           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14652         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14653           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14654         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14655           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14656
14657         /* 3com boards. */
14658         { TG3PCI_SUBVENDOR_ID_3COM,
14659           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14660         { TG3PCI_SUBVENDOR_ID_3COM,
14661           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14662         { TG3PCI_SUBVENDOR_ID_3COM,
14663           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14664         { TG3PCI_SUBVENDOR_ID_3COM,
14665           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14666         { TG3PCI_SUBVENDOR_ID_3COM,
14667           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14668
14669         /* DELL boards. */
14670         { TG3PCI_SUBVENDOR_ID_DELL,
14671           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14672         { TG3PCI_SUBVENDOR_ID_DELL,
14673           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14674         { TG3PCI_SUBVENDOR_ID_DELL,
14675           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14676         { TG3PCI_SUBVENDOR_ID_DELL,
14677           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14678
14679         /* Compaq boards. */
14680         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14681           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14682         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14683           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14684         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14685           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14686         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14687           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14688         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14689           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14690
14691         /* IBM boards. */
14692         { TG3PCI_SUBVENDOR_ID_IBM,
14693           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14694 };
14695
14696 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14697 {
14698         int i;
14699
14700         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14701                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14702                      tp->pdev->subsystem_vendor) &&
14703                     (subsys_id_to_phy_id[i].subsys_devid ==
14704                      tp->pdev->subsystem_device))
14705                         return &subsys_id_to_phy_id[i];
14706         }
14707         return NULL;
14708 }
14709
14710 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14711 {
14712         u32 val;
14713
14714         tp->phy_id = TG3_PHY_ID_INVALID;
14715         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14716
14717         /* Assume an onboard device and WOL capable by default.  */
14718         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14719         tg3_flag_set(tp, WOL_CAP);
14720
14721         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14722                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14723                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14724                         tg3_flag_set(tp, IS_NIC);
14725                 }
14726                 val = tr32(VCPU_CFGSHDW);
14727                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14728                         tg3_flag_set(tp, ASPM_WORKAROUND);
14729                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14730                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14731                         tg3_flag_set(tp, WOL_ENABLE);
14732                         device_set_wakeup_enable(&tp->pdev->dev, true);
14733                 }
14734                 goto done;
14735         }
14736
14737         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14738         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14739                 u32 nic_cfg, led_cfg;
14740                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14741                 int eeprom_phy_serdes = 0;
14742
14743                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14744                 tp->nic_sram_data_cfg = nic_cfg;
14745
14746                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14747                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14748                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14749                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14750                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14751                     (ver > 0) && (ver < 0x100))
14752                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14753
14754                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14755                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14756
14757                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14758                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14759                         eeprom_phy_serdes = 1;
14760
14761                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14762                 if (nic_phy_id != 0) {
14763                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14764                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14765
14766                         eeprom_phy_id  = (id1 >> 16) << 10;
14767                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14768                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14769                 } else
14770                         eeprom_phy_id = 0;
14771
14772                 tp->phy_id = eeprom_phy_id;
14773                 if (eeprom_phy_serdes) {
14774                         if (!tg3_flag(tp, 5705_PLUS))
14775                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14776                         else
14777                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14778                 }
14779
14780                 if (tg3_flag(tp, 5750_PLUS))
14781                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14782                                     SHASTA_EXT_LED_MODE_MASK);
14783                 else
14784                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14785
14786                 switch (led_cfg) {
14787                 default:
14788                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14789                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14790                         break;
14791
14792                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14793                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14794                         break;
14795
14796                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14797                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14798
14799                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14800                          * read on some older 5700/5701 bootcode.
14801                          */
14802                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14803                             tg3_asic_rev(tp) == ASIC_REV_5701)
14804                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14805
14806                         break;
14807
14808                 case SHASTA_EXT_LED_SHARED:
14809                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14810                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14811                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14812                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14813                                                  LED_CTRL_MODE_PHY_2);
14814                         break;
14815
14816                 case SHASTA_EXT_LED_MAC:
14817                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14818                         break;
14819
14820                 case SHASTA_EXT_LED_COMBO:
14821                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14822                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14823                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14824                                                  LED_CTRL_MODE_PHY_2);
14825                         break;
14826
14827                 }
14828
14829                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14830                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14831                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14832                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14833
14834                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14835                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14836
14837                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14838                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14839                         if ((tp->pdev->subsystem_vendor ==
14840                              PCI_VENDOR_ID_ARIMA) &&
14841                             (tp->pdev->subsystem_device == 0x205a ||
14842                              tp->pdev->subsystem_device == 0x2063))
14843                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14844                 } else {
14845                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14846                         tg3_flag_set(tp, IS_NIC);
14847                 }
14848
14849                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14850                         tg3_flag_set(tp, ENABLE_ASF);
14851                         if (tg3_flag(tp, 5750_PLUS))
14852                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14853                 }
14854
14855                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14856                     tg3_flag(tp, 5750_PLUS))
14857                         tg3_flag_set(tp, ENABLE_APE);
14858
14859                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14860                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14861                         tg3_flag_clear(tp, WOL_CAP);
14862
14863                 if (tg3_flag(tp, WOL_CAP) &&
14864                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14865                         tg3_flag_set(tp, WOL_ENABLE);
14866                         device_set_wakeup_enable(&tp->pdev->dev, true);
14867                 }
14868
14869                 if (cfg2 & (1 << 17))
14870                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14871
14872                 /* serdes signal pre-emphasis in register 0x590 set by */
14873                 /* bootcode if bit 18 is set */
14874                 if (cfg2 & (1 << 18))
14875                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14876
14877                 if ((tg3_flag(tp, 57765_PLUS) ||
14878                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14879                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14880                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14881                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14882
14883                 if (tg3_flag(tp, PCI_EXPRESS)) {
14884                         u32 cfg3;
14885
14886                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14887                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14888                             !tg3_flag(tp, 57765_PLUS) &&
14889                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14890                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14891                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14892                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14893                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14894                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14895                 }
14896
14897                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14898                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14899                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14900                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14901                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14902                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14903         }
14904 done:
14905         if (tg3_flag(tp, WOL_CAP))
14906                 device_set_wakeup_enable(&tp->pdev->dev,
14907                                          tg3_flag(tp, WOL_ENABLE));
14908         else
14909                 device_set_wakeup_capable(&tp->pdev->dev, false);
14910 }
14911
14912 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14913 {
14914         int i, err;
14915         u32 val2, off = offset * 8;
14916
14917         err = tg3_nvram_lock(tp);
14918         if (err)
14919                 return err;
14920
14921         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14922         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14923                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14924         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14925         udelay(10);
14926
14927         for (i = 0; i < 100; i++) {
14928                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14929                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14930                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14931                         break;
14932                 }
14933                 udelay(10);
14934         }
14935
14936         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14937
14938         tg3_nvram_unlock(tp);
14939         if (val2 & APE_OTP_STATUS_CMD_DONE)
14940                 return 0;
14941
14942         return -EBUSY;
14943 }
14944
14945 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14946 {
14947         int i;
14948         u32 val;
14949
14950         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14951         tw32(OTP_CTRL, cmd);
14952
14953         /* Wait for up to 1 ms for command to execute. */
14954         for (i = 0; i < 100; i++) {
14955                 val = tr32(OTP_STATUS);
14956                 if (val & OTP_STATUS_CMD_DONE)
14957                         break;
14958                 udelay(10);
14959         }
14960
14961         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14962 }
14963
14964 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14965  * configuration is a 32-bit value that straddles the alignment boundary.
14966  * We do two 32-bit reads and then shift and merge the results.
14967  */
14968 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14969 {
14970         u32 bhalf_otp, thalf_otp;
14971
14972         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14973
14974         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14975                 return 0;
14976
14977         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14978
14979         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14980                 return 0;
14981
14982         thalf_otp = tr32(OTP_READ_DATA);
14983
14984         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14985
14986         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14987                 return 0;
14988
14989         bhalf_otp = tr32(OTP_READ_DATA);
14990
14991         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14992 }
14993
14994 static void tg3_phy_init_link_config(struct tg3 *tp)
14995 {
14996         u32 adv = ADVERTISED_Autoneg;
14997
14998         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14999                 adv |= ADVERTISED_1000baseT_Half |
15000                        ADVERTISED_1000baseT_Full;
15001
15002         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15003                 adv |= ADVERTISED_100baseT_Half |
15004                        ADVERTISED_100baseT_Full |
15005                        ADVERTISED_10baseT_Half |
15006                        ADVERTISED_10baseT_Full |
15007                        ADVERTISED_TP;
15008         else
15009                 adv |= ADVERTISED_FIBRE;
15010
15011         tp->link_config.advertising = adv;
15012         tp->link_config.speed = SPEED_UNKNOWN;
15013         tp->link_config.duplex = DUPLEX_UNKNOWN;
15014         tp->link_config.autoneg = AUTONEG_ENABLE;
15015         tp->link_config.active_speed = SPEED_UNKNOWN;
15016         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15017
15018         tp->old_link = -1;
15019 }
15020
15021 static int tg3_phy_probe(struct tg3 *tp)
15022 {
15023         u32 hw_phy_id_1, hw_phy_id_2;
15024         u32 hw_phy_id, hw_phy_id_masked;
15025         int err;
15026
15027         /* flow control autonegotiation is default behavior */
15028         tg3_flag_set(tp, PAUSE_AUTONEG);
15029         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15030
15031         if (tg3_flag(tp, ENABLE_APE)) {
15032                 switch (tp->pci_fn) {
15033                 case 0:
15034                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15035                         break;
15036                 case 1:
15037                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15038                         break;
15039                 case 2:
15040                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15041                         break;
15042                 case 3:
15043                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15044                         break;
15045                 }
15046         }
15047
15048         if (!tg3_flag(tp, ENABLE_ASF) &&
15049             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15050             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15051                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15052                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15053
15054         if (tg3_flag(tp, USE_PHYLIB))
15055                 return tg3_phy_init(tp);
15056
15057         /* Reading the PHY ID register can conflict with ASF
15058          * firmware access to the PHY hardware.
15059          */
15060         err = 0;
15061         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15062                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15063         } else {
15064                 /* Now read the physical PHY_ID from the chip and verify
15065                  * that it is sane.  If it doesn't look good, we fall back
15066                  * to either the hard-coded table based PHY_ID and failing
15067                  * that the value found in the eeprom area.
15068                  */
15069                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15070                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15071
15072                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15073                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15074                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15075
15076                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15077         }
15078
15079         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15080                 tp->phy_id = hw_phy_id;
15081                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15082                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15083                 else
15084                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15085         } else {
15086                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15087                         /* Do nothing, phy ID already set up in
15088                          * tg3_get_eeprom_hw_cfg().
15089                          */
15090                 } else {
15091                         struct subsys_tbl_ent *p;
15092
15093                         /* No eeprom signature?  Try the hardcoded
15094                          * subsys device table.
15095                          */
15096                         p = tg3_lookup_by_subsys(tp);
15097                         if (p) {
15098                                 tp->phy_id = p->phy_id;
15099                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15100                                 /* For now we saw the IDs 0xbc050cd0,
15101                                  * 0xbc050f80 and 0xbc050c30 on devices
15102                                  * connected to an BCM4785 and there are
15103                                  * probably more. Just assume that the phy is
15104                                  * supported when it is connected to a SSB core
15105                                  * for now.
15106                                  */
15107                                 return -ENODEV;
15108                         }
15109
15110                         if (!tp->phy_id ||
15111                             tp->phy_id == TG3_PHY_ID_BCM8002)
15112                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15113                 }
15114         }
15115
15116         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15117             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15118              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15119              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15120              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15121              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15122               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15123              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15124               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15125                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15126
15127                 tp->eee.supported = SUPPORTED_100baseT_Full |
15128                                     SUPPORTED_1000baseT_Full;
15129                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15130                                      ADVERTISED_1000baseT_Full;
15131                 tp->eee.eee_enabled = 1;
15132                 tp->eee.tx_lpi_enabled = 1;
15133                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15134         }
15135
15136         tg3_phy_init_link_config(tp);
15137
15138         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15139             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15140             !tg3_flag(tp, ENABLE_APE) &&
15141             !tg3_flag(tp, ENABLE_ASF)) {
15142                 u32 bmsr, dummy;
15143
15144                 tg3_readphy(tp, MII_BMSR, &bmsr);
15145                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15146                     (bmsr & BMSR_LSTATUS))
15147                         goto skip_phy_reset;
15148
15149                 err = tg3_phy_reset(tp);
15150                 if (err)
15151                         return err;
15152
15153                 tg3_phy_set_wirespeed(tp);
15154
15155                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15156                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15157                                             tp->link_config.flowctrl);
15158
15159                         tg3_writephy(tp, MII_BMCR,
15160                                      BMCR_ANENABLE | BMCR_ANRESTART);
15161                 }
15162         }
15163
15164 skip_phy_reset:
15165         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15166                 err = tg3_init_5401phy_dsp(tp);
15167                 if (err)
15168                         return err;
15169
15170                 err = tg3_init_5401phy_dsp(tp);
15171         }
15172
15173         return err;
15174 }
15175
15176 static void tg3_read_vpd(struct tg3 *tp)
15177 {
15178         u8 *vpd_data;
15179         unsigned int block_end, rosize, len;
15180         u32 vpdlen;
15181         int j, i = 0;
15182
15183         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15184         if (!vpd_data)
15185                 goto out_no_vpd;
15186
15187         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15188         if (i < 0)
15189                 goto out_not_found;
15190
15191         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15192         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15193         i += PCI_VPD_LRDT_TAG_SIZE;
15194
15195         if (block_end > vpdlen)
15196                 goto out_not_found;
15197
15198         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15199                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15200         if (j > 0) {
15201                 len = pci_vpd_info_field_size(&vpd_data[j]);
15202
15203                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15204                 if (j + len > block_end || len != 4 ||
15205                     memcmp(&vpd_data[j], "1028", 4))
15206                         goto partno;
15207
15208                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15209                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15210                 if (j < 0)
15211                         goto partno;
15212
15213                 len = pci_vpd_info_field_size(&vpd_data[j]);
15214
15215                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15216                 if (j + len > block_end)
15217                         goto partno;
15218
15219                 if (len >= sizeof(tp->fw_ver))
15220                         len = sizeof(tp->fw_ver) - 1;
15221                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15222                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15223                          &vpd_data[j]);
15224         }
15225
15226 partno:
15227         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15228                                       PCI_VPD_RO_KEYWORD_PARTNO);
15229         if (i < 0)
15230                 goto out_not_found;
15231
15232         len = pci_vpd_info_field_size(&vpd_data[i]);
15233
15234         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15235         if (len > TG3_BPN_SIZE ||
15236             (len + i) > vpdlen)
15237                 goto out_not_found;
15238
15239         memcpy(tp->board_part_number, &vpd_data[i], len);
15240
15241 out_not_found:
15242         kfree(vpd_data);
15243         if (tp->board_part_number[0])
15244                 return;
15245
15246 out_no_vpd:
15247         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15248                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15249                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15250                         strcpy(tp->board_part_number, "BCM5717");
15251                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15252                         strcpy(tp->board_part_number, "BCM5718");
15253                 else
15254                         goto nomatch;
15255         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15256                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15257                         strcpy(tp->board_part_number, "BCM57780");
15258                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15259                         strcpy(tp->board_part_number, "BCM57760");
15260                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15261                         strcpy(tp->board_part_number, "BCM57790");
15262                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15263                         strcpy(tp->board_part_number, "BCM57788");
15264                 else
15265                         goto nomatch;
15266         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15267                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15268                         strcpy(tp->board_part_number, "BCM57761");
15269                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15270                         strcpy(tp->board_part_number, "BCM57765");
15271                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15272                         strcpy(tp->board_part_number, "BCM57781");
15273                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15274                         strcpy(tp->board_part_number, "BCM57785");
15275                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15276                         strcpy(tp->board_part_number, "BCM57791");
15277                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15278                         strcpy(tp->board_part_number, "BCM57795");
15279                 else
15280                         goto nomatch;
15281         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15282                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15283                         strcpy(tp->board_part_number, "BCM57762");
15284                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15285                         strcpy(tp->board_part_number, "BCM57766");
15286                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15287                         strcpy(tp->board_part_number, "BCM57782");
15288                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15289                         strcpy(tp->board_part_number, "BCM57786");
15290                 else
15291                         goto nomatch;
15292         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15293                 strcpy(tp->board_part_number, "BCM95906");
15294         } else {
15295 nomatch:
15296                 strcpy(tp->board_part_number, "none");
15297         }
15298 }
15299
15300 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15301 {
15302         u32 val;
15303
15304         if (tg3_nvram_read(tp, offset, &val) ||
15305             (val & 0xfc000000) != 0x0c000000 ||
15306             tg3_nvram_read(tp, offset + 4, &val) ||
15307             val != 0)
15308                 return 0;
15309
15310         return 1;
15311 }
15312
15313 static void tg3_read_bc_ver(struct tg3 *tp)
15314 {
15315         u32 val, offset, start, ver_offset;
15316         int i, dst_off;
15317         bool newver = false;
15318
15319         if (tg3_nvram_read(tp, 0xc, &offset) ||
15320             tg3_nvram_read(tp, 0x4, &start))
15321                 return;
15322
15323         offset = tg3_nvram_logical_addr(tp, offset);
15324
15325         if (tg3_nvram_read(tp, offset, &val))
15326                 return;
15327
15328         if ((val & 0xfc000000) == 0x0c000000) {
15329                 if (tg3_nvram_read(tp, offset + 4, &val))
15330                         return;
15331
15332                 if (val == 0)
15333                         newver = true;
15334         }
15335
15336         dst_off = strlen(tp->fw_ver);
15337
15338         if (newver) {
15339                 if (TG3_VER_SIZE - dst_off < 16 ||
15340                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15341                         return;
15342
15343                 offset = offset + ver_offset - start;
15344                 for (i = 0; i < 16; i += 4) {
15345                         __be32 v;
15346                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15347                                 return;
15348
15349                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15350                 }
15351         } else {
15352                 u32 major, minor;
15353
15354                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15355                         return;
15356
15357                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15358                         TG3_NVM_BCVER_MAJSFT;
15359                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15360                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15361                          "v%d.%02d", major, minor);
15362         }
15363 }
15364
15365 static void tg3_read_hwsb_ver(struct tg3 *tp)
15366 {
15367         u32 val, major, minor;
15368
15369         /* Use native endian representation */
15370         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15371                 return;
15372
15373         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15374                 TG3_NVM_HWSB_CFG1_MAJSFT;
15375         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15376                 TG3_NVM_HWSB_CFG1_MINSFT;
15377
15378         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15379 }
15380
15381 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15382 {
15383         u32 offset, major, minor, build;
15384
15385         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15386
15387         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15388                 return;
15389
15390         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15391         case TG3_EEPROM_SB_REVISION_0:
15392                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15393                 break;
15394         case TG3_EEPROM_SB_REVISION_2:
15395                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15396                 break;
15397         case TG3_EEPROM_SB_REVISION_3:
15398                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15399                 break;
15400         case TG3_EEPROM_SB_REVISION_4:
15401                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15402                 break;
15403         case TG3_EEPROM_SB_REVISION_5:
15404                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15405                 break;
15406         case TG3_EEPROM_SB_REVISION_6:
15407                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15408                 break;
15409         default:
15410                 return;
15411         }
15412
15413         if (tg3_nvram_read(tp, offset, &val))
15414                 return;
15415
15416         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15417                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15418         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15419                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15420         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15421
15422         if (minor > 99 || build > 26)
15423                 return;
15424
15425         offset = strlen(tp->fw_ver);
15426         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15427                  " v%d.%02d", major, minor);
15428
15429         if (build > 0) {
15430                 offset = strlen(tp->fw_ver);
15431                 if (offset < TG3_VER_SIZE - 1)
15432                         tp->fw_ver[offset] = 'a' + build - 1;
15433         }
15434 }
15435
15436 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15437 {
15438         u32 val, offset, start;
15439         int i, vlen;
15440
15441         for (offset = TG3_NVM_DIR_START;
15442              offset < TG3_NVM_DIR_END;
15443              offset += TG3_NVM_DIRENT_SIZE) {
15444                 if (tg3_nvram_read(tp, offset, &val))
15445                         return;
15446
15447                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15448                         break;
15449         }
15450
15451         if (offset == TG3_NVM_DIR_END)
15452                 return;
15453
15454         if (!tg3_flag(tp, 5705_PLUS))
15455                 start = 0x08000000;
15456         else if (tg3_nvram_read(tp, offset - 4, &start))
15457                 return;
15458
15459         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15460             !tg3_fw_img_is_valid(tp, offset) ||
15461             tg3_nvram_read(tp, offset + 8, &val))
15462                 return;
15463
15464         offset += val - start;
15465
15466         vlen = strlen(tp->fw_ver);
15467
15468         tp->fw_ver[vlen++] = ',';
15469         tp->fw_ver[vlen++] = ' ';
15470
15471         for (i = 0; i < 4; i++) {
15472                 __be32 v;
15473                 if (tg3_nvram_read_be32(tp, offset, &v))
15474                         return;
15475
15476                 offset += sizeof(v);
15477
15478                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15479                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15480                         break;
15481                 }
15482
15483                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15484                 vlen += sizeof(v);
15485         }
15486 }
15487
15488 static void tg3_probe_ncsi(struct tg3 *tp)
15489 {
15490         u32 apedata;
15491
15492         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15493         if (apedata != APE_SEG_SIG_MAGIC)
15494                 return;
15495
15496         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15497         if (!(apedata & APE_FW_STATUS_READY))
15498                 return;
15499
15500         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15501                 tg3_flag_set(tp, APE_HAS_NCSI);
15502 }
15503
15504 static void tg3_read_dash_ver(struct tg3 *tp)
15505 {
15506         int vlen;
15507         u32 apedata;
15508         char *fwtype;
15509
15510         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15511
15512         if (tg3_flag(tp, APE_HAS_NCSI))
15513                 fwtype = "NCSI";
15514         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15515                 fwtype = "SMASH";
15516         else
15517                 fwtype = "DASH";
15518
15519         vlen = strlen(tp->fw_ver);
15520
15521         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15522                  fwtype,
15523                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15524                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15525                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15526                  (apedata & APE_FW_VERSION_BLDMSK));
15527 }
15528
15529 static void tg3_read_otp_ver(struct tg3 *tp)
15530 {
15531         u32 val, val2;
15532
15533         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15534                 return;
15535
15536         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15537             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15538             TG3_OTP_MAGIC0_VALID(val)) {
15539                 u64 val64 = (u64) val << 32 | val2;
15540                 u32 ver = 0;
15541                 int i, vlen;
15542
15543                 for (i = 0; i < 7; i++) {
15544                         if ((val64 & 0xff) == 0)
15545                                 break;
15546                         ver = val64 & 0xff;
15547                         val64 >>= 8;
15548                 }
15549                 vlen = strlen(tp->fw_ver);
15550                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15551         }
15552 }
15553
15554 static void tg3_read_fw_ver(struct tg3 *tp)
15555 {
15556         u32 val;
15557         bool vpd_vers = false;
15558
15559         if (tp->fw_ver[0] != 0)
15560                 vpd_vers = true;
15561
15562         if (tg3_flag(tp, NO_NVRAM)) {
15563                 strcat(tp->fw_ver, "sb");
15564                 tg3_read_otp_ver(tp);
15565                 return;
15566         }
15567
15568         if (tg3_nvram_read(tp, 0, &val))
15569                 return;
15570
15571         if (val == TG3_EEPROM_MAGIC)
15572                 tg3_read_bc_ver(tp);
15573         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15574                 tg3_read_sb_ver(tp, val);
15575         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15576                 tg3_read_hwsb_ver(tp);
15577
15578         if (tg3_flag(tp, ENABLE_ASF)) {
15579                 if (tg3_flag(tp, ENABLE_APE)) {
15580                         tg3_probe_ncsi(tp);
15581                         if (!vpd_vers)
15582                                 tg3_read_dash_ver(tp);
15583                 } else if (!vpd_vers) {
15584                         tg3_read_mgmtfw_ver(tp);
15585                 }
15586         }
15587
15588         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15589 }
15590
15591 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15592 {
15593         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15594                 return TG3_RX_RET_MAX_SIZE_5717;
15595         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15596                 return TG3_RX_RET_MAX_SIZE_5700;
15597         else
15598                 return TG3_RX_RET_MAX_SIZE_5705;
15599 }
15600
15601 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15602         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15603         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15604         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15605         { },
15606 };
15607
15608 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15609 {
15610         struct pci_dev *peer;
15611         unsigned int func, devnr = tp->pdev->devfn & ~7;
15612
15613         for (func = 0; func < 8; func++) {
15614                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15615                 if (peer && peer != tp->pdev)
15616                         break;
15617                 pci_dev_put(peer);
15618         }
15619         /* 5704 can be configured in single-port mode, set peer to
15620          * tp->pdev in that case.
15621          */
15622         if (!peer) {
15623                 peer = tp->pdev;
15624                 return peer;
15625         }
15626
15627         /*
15628          * We don't need to keep the refcount elevated; there's no way
15629          * to remove one half of this device without removing the other
15630          */
15631         pci_dev_put(peer);
15632
15633         return peer;
15634 }
15635
15636 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15637 {
15638         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15639         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15640                 u32 reg;
15641
15642                 /* All devices that use the alternate
15643                  * ASIC REV location have a CPMU.
15644                  */
15645                 tg3_flag_set(tp, CPMU_PRESENT);
15646
15647                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15648                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15649                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15650                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15651                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15652                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15653                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15654                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15655                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15657                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15658                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15659                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15660                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15661                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15662                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15663                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15664                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15665                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15666                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15667                 else
15668                         reg = TG3PCI_PRODID_ASICREV;
15669
15670                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15671         }
15672
15673         /* Wrong chip ID in 5752 A0. This code can be removed later
15674          * as A0 is not in production.
15675          */
15676         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15677                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15678
15679         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15680                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15681
15682         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15683             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15684             tg3_asic_rev(tp) == ASIC_REV_5720)
15685                 tg3_flag_set(tp, 5717_PLUS);
15686
15687         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15688             tg3_asic_rev(tp) == ASIC_REV_57766)
15689                 tg3_flag_set(tp, 57765_CLASS);
15690
15691         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15692              tg3_asic_rev(tp) == ASIC_REV_5762)
15693                 tg3_flag_set(tp, 57765_PLUS);
15694
15695         /* Intentionally exclude ASIC_REV_5906 */
15696         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15697             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15698             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15699             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15700             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15701             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15702             tg3_flag(tp, 57765_PLUS))
15703                 tg3_flag_set(tp, 5755_PLUS);
15704
15705         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15706             tg3_asic_rev(tp) == ASIC_REV_5714)
15707                 tg3_flag_set(tp, 5780_CLASS);
15708
15709         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15710             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15711             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15712             tg3_flag(tp, 5755_PLUS) ||
15713             tg3_flag(tp, 5780_CLASS))
15714                 tg3_flag_set(tp, 5750_PLUS);
15715
15716         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15717             tg3_flag(tp, 5750_PLUS))
15718                 tg3_flag_set(tp, 5705_PLUS);
15719 }
15720
15721 static bool tg3_10_100_only_device(struct tg3 *tp,
15722                                    const struct pci_device_id *ent)
15723 {
15724         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15725
15726         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15727              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15728             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15729                 return true;
15730
15731         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15732                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15733                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15734                                 return true;
15735                 } else {
15736                         return true;
15737                 }
15738         }
15739
15740         return false;
15741 }
15742
15743 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15744 {
15745         u32 misc_ctrl_reg;
15746         u32 pci_state_reg, grc_misc_cfg;
15747         u32 val;
15748         u16 pci_cmd;
15749         int err;
15750
15751         /* Force memory write invalidate off.  If we leave it on,
15752          * then on 5700_BX chips we have to enable a workaround.
15753          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15754          * to match the cacheline size.  The Broadcom driver have this
15755          * workaround but turns MWI off all the times so never uses
15756          * it.  This seems to suggest that the workaround is insufficient.
15757          */
15758         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15759         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15760         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15761
15762         /* Important! -- Make sure register accesses are byteswapped
15763          * correctly.  Also, for those chips that require it, make
15764          * sure that indirect register accesses are enabled before
15765          * the first operation.
15766          */
15767         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15768                               &misc_ctrl_reg);
15769         tp->misc_host_ctrl |= (misc_ctrl_reg &
15770                                MISC_HOST_CTRL_CHIPREV);
15771         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15772                                tp->misc_host_ctrl);
15773
15774         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15775
15776         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15777          * we need to disable memory and use config. cycles
15778          * only to access all registers. The 5702/03 chips
15779          * can mistakenly decode the special cycles from the
15780          * ICH chipsets as memory write cycles, causing corruption
15781          * of register and memory space. Only certain ICH bridges
15782          * will drive special cycles with non-zero data during the
15783          * address phase which can fall within the 5703's address
15784          * range. This is not an ICH bug as the PCI spec allows
15785          * non-zero address during special cycles. However, only
15786          * these ICH bridges are known to drive non-zero addresses
15787          * during special cycles.
15788          *
15789          * Since special cycles do not cross PCI bridges, we only
15790          * enable this workaround if the 5703 is on the secondary
15791          * bus of these ICH bridges.
15792          */
15793         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15794             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15795                 static struct tg3_dev_id {
15796                         u32     vendor;
15797                         u32     device;
15798                         u32     rev;
15799                 } ich_chipsets[] = {
15800                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15801                           PCI_ANY_ID },
15802                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15803                           PCI_ANY_ID },
15804                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15805                           0xa },
15806                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15807                           PCI_ANY_ID },
15808                         { },
15809                 };
15810                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15811                 struct pci_dev *bridge = NULL;
15812
15813                 while (pci_id->vendor != 0) {
15814                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15815                                                 bridge);
15816                         if (!bridge) {
15817                                 pci_id++;
15818                                 continue;
15819                         }
15820                         if (pci_id->rev != PCI_ANY_ID) {
15821                                 if (bridge->revision > pci_id->rev)
15822                                         continue;
15823                         }
15824                         if (bridge->subordinate &&
15825                             (bridge->subordinate->number ==
15826                              tp->pdev->bus->number)) {
15827                                 tg3_flag_set(tp, ICH_WORKAROUND);
15828                                 pci_dev_put(bridge);
15829                                 break;
15830                         }
15831                 }
15832         }
15833
15834         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15835                 static struct tg3_dev_id {
15836                         u32     vendor;
15837                         u32     device;
15838                 } bridge_chipsets[] = {
15839                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15840                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15841                         { },
15842                 };
15843                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15844                 struct pci_dev *bridge = NULL;
15845
15846                 while (pci_id->vendor != 0) {
15847                         bridge = pci_get_device(pci_id->vendor,
15848                                                 pci_id->device,
15849                                                 bridge);
15850                         if (!bridge) {
15851                                 pci_id++;
15852                                 continue;
15853                         }
15854                         if (bridge->subordinate &&
15855                             (bridge->subordinate->number <=
15856                              tp->pdev->bus->number) &&
15857                             (bridge->subordinate->busn_res.end >=
15858                              tp->pdev->bus->number)) {
15859                                 tg3_flag_set(tp, 5701_DMA_BUG);
15860                                 pci_dev_put(bridge);
15861                                 break;
15862                         }
15863                 }
15864         }
15865
15866         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15867          * DMA addresses > 40-bit. This bridge may have other additional
15868          * 57xx devices behind it in some 4-port NIC designs for example.
15869          * Any tg3 device found behind the bridge will also need the 40-bit
15870          * DMA workaround.
15871          */
15872         if (tg3_flag(tp, 5780_CLASS)) {
15873                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15874                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15875         } else {
15876                 struct pci_dev *bridge = NULL;
15877
15878                 do {
15879                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15880                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15881                                                 bridge);
15882                         if (bridge && bridge->subordinate &&
15883                             (bridge->subordinate->number <=
15884                              tp->pdev->bus->number) &&
15885                             (bridge->subordinate->busn_res.end >=
15886                              tp->pdev->bus->number)) {
15887                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15888                                 pci_dev_put(bridge);
15889                                 break;
15890                         }
15891                 } while (bridge);
15892         }
15893
15894         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15895             tg3_asic_rev(tp) == ASIC_REV_5714)
15896                 tp->pdev_peer = tg3_find_peer(tp);
15897
15898         /* Determine TSO capabilities */
15899         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15900                 ; /* Do nothing. HW bug. */
15901         else if (tg3_flag(tp, 57765_PLUS))
15902                 tg3_flag_set(tp, HW_TSO_3);
15903         else if (tg3_flag(tp, 5755_PLUS) ||
15904                  tg3_asic_rev(tp) == ASIC_REV_5906)
15905                 tg3_flag_set(tp, HW_TSO_2);
15906         else if (tg3_flag(tp, 5750_PLUS)) {
15907                 tg3_flag_set(tp, HW_TSO_1);
15908                 tg3_flag_set(tp, TSO_BUG);
15909                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15910                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15911                         tg3_flag_clear(tp, TSO_BUG);
15912         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15913                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15914                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15915                 tg3_flag_set(tp, FW_TSO);
15916                 tg3_flag_set(tp, TSO_BUG);
15917                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15918                         tp->fw_needed = FIRMWARE_TG3TSO5;
15919                 else
15920                         tp->fw_needed = FIRMWARE_TG3TSO;
15921         }
15922
15923         /* Selectively allow TSO based on operating conditions */
15924         if (tg3_flag(tp, HW_TSO_1) ||
15925             tg3_flag(tp, HW_TSO_2) ||
15926             tg3_flag(tp, HW_TSO_3) ||
15927             tg3_flag(tp, FW_TSO)) {
15928                 /* For firmware TSO, assume ASF is disabled.
15929                  * We'll disable TSO later if we discover ASF
15930                  * is enabled in tg3_get_eeprom_hw_cfg().
15931                  */
15932                 tg3_flag_set(tp, TSO_CAPABLE);
15933         } else {
15934                 tg3_flag_clear(tp, TSO_CAPABLE);
15935                 tg3_flag_clear(tp, TSO_BUG);
15936                 tp->fw_needed = NULL;
15937         }
15938
15939         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15940                 tp->fw_needed = FIRMWARE_TG3;
15941
15942         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15943                 tp->fw_needed = FIRMWARE_TG357766;
15944
15945         tp->irq_max = 1;
15946
15947         if (tg3_flag(tp, 5750_PLUS)) {
15948                 tg3_flag_set(tp, SUPPORT_MSI);
15949                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15950                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15951                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15952                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15953                      tp->pdev_peer == tp->pdev))
15954                         tg3_flag_clear(tp, SUPPORT_MSI);
15955
15956                 if (tg3_flag(tp, 5755_PLUS) ||
15957                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15958                         tg3_flag_set(tp, 1SHOT_MSI);
15959                 }
15960
15961                 if (tg3_flag(tp, 57765_PLUS)) {
15962                         tg3_flag_set(tp, SUPPORT_MSIX);
15963                         tp->irq_max = TG3_IRQ_MAX_VECS;
15964                 }
15965         }
15966
15967         tp->txq_max = 1;
15968         tp->rxq_max = 1;
15969         if (tp->irq_max > 1) {
15970                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15971                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15972
15973                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15974                     tg3_asic_rev(tp) == ASIC_REV_5720)
15975                         tp->txq_max = tp->irq_max - 1;
15976         }
15977
15978         if (tg3_flag(tp, 5755_PLUS) ||
15979             tg3_asic_rev(tp) == ASIC_REV_5906)
15980                 tg3_flag_set(tp, SHORT_DMA_BUG);
15981
15982         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15983                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15984
15985         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15986             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15987             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15988             tg3_asic_rev(tp) == ASIC_REV_5762)
15989                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15990
15991         if (tg3_flag(tp, 57765_PLUS) &&
15992             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15993                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15994
15995         if (!tg3_flag(tp, 5705_PLUS) ||
15996             tg3_flag(tp, 5780_CLASS) ||
15997             tg3_flag(tp, USE_JUMBO_BDFLAG))
15998                 tg3_flag_set(tp, JUMBO_CAPABLE);
15999
16000         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16001                               &pci_state_reg);
16002
16003         if (pci_is_pcie(tp->pdev)) {
16004                 u16 lnkctl;
16005
16006                 tg3_flag_set(tp, PCI_EXPRESS);
16007
16008                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16009                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16010                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16011                                 tg3_flag_clear(tp, HW_TSO_2);
16012                                 tg3_flag_clear(tp, TSO_CAPABLE);
16013                         }
16014                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16015                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16016                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16017                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16018                                 tg3_flag_set(tp, CLKREQ_BUG);
16019                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16020                         tg3_flag_set(tp, L1PLLPD_EN);
16021                 }
16022         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16023                 /* BCM5785 devices are effectively PCIe devices, and should
16024                  * follow PCIe codepaths, but do not have a PCIe capabilities
16025                  * section.
16026                  */
16027                 tg3_flag_set(tp, PCI_EXPRESS);
16028         } else if (!tg3_flag(tp, 5705_PLUS) ||
16029                    tg3_flag(tp, 5780_CLASS)) {
16030                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16031                 if (!tp->pcix_cap) {
16032                         dev_err(&tp->pdev->dev,
16033                                 "Cannot find PCI-X capability, aborting\n");
16034                         return -EIO;
16035                 }
16036
16037                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16038                         tg3_flag_set(tp, PCIX_MODE);
16039         }
16040
16041         /* If we have an AMD 762 or VIA K8T800 chipset, write
16042          * reordering to the mailbox registers done by the host
16043          * controller can cause major troubles.  We read back from
16044          * every mailbox register write to force the writes to be
16045          * posted to the chip in order.
16046          */
16047         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16048             !tg3_flag(tp, PCI_EXPRESS))
16049                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16050
16051         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16052                              &tp->pci_cacheline_sz);
16053         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16054                              &tp->pci_lat_timer);
16055         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16056             tp->pci_lat_timer < 64) {
16057                 tp->pci_lat_timer = 64;
16058                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16059                                       tp->pci_lat_timer);
16060         }
16061
16062         /* Important! -- It is critical that the PCI-X hw workaround
16063          * situation is decided before the first MMIO register access.
16064          */
16065         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16066                 /* 5700 BX chips need to have their TX producer index
16067                  * mailboxes written twice to workaround a bug.
16068                  */
16069                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16070
16071                 /* If we are in PCI-X mode, enable register write workaround.
16072                  *
16073                  * The workaround is to use indirect register accesses
16074                  * for all chip writes not to mailbox registers.
16075                  */
16076                 if (tg3_flag(tp, PCIX_MODE)) {
16077                         u32 pm_reg;
16078
16079                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16080
16081                         /* The chip can have it's power management PCI config
16082                          * space registers clobbered due to this bug.
16083                          * So explicitly force the chip into D0 here.
16084                          */
16085                         pci_read_config_dword(tp->pdev,
16086                                               tp->pm_cap + PCI_PM_CTRL,
16087                                               &pm_reg);
16088                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16089                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16090                         pci_write_config_dword(tp->pdev,
16091                                                tp->pm_cap + PCI_PM_CTRL,
16092                                                pm_reg);
16093
16094                         /* Also, force SERR#/PERR# in PCI command. */
16095                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16096                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16097                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16098                 }
16099         }
16100
16101         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16102                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16103         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16104                 tg3_flag_set(tp, PCI_32BIT);
16105
16106         /* Chip-specific fixup from Broadcom driver */
16107         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16108             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16109                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16110                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16111         }
16112
16113         /* Default fast path register access methods */
16114         tp->read32 = tg3_read32;
16115         tp->write32 = tg3_write32;
16116         tp->read32_mbox = tg3_read32;
16117         tp->write32_mbox = tg3_write32;
16118         tp->write32_tx_mbox = tg3_write32;
16119         tp->write32_rx_mbox = tg3_write32;
16120
16121         /* Various workaround register access methods */
16122         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16123                 tp->write32 = tg3_write_indirect_reg32;
16124         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16125                  (tg3_flag(tp, PCI_EXPRESS) &&
16126                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16127                 /*
16128                  * Back to back register writes can cause problems on these
16129                  * chips, the workaround is to read back all reg writes
16130                  * except those to mailbox regs.
16131                  *
16132                  * See tg3_write_indirect_reg32().
16133                  */
16134                 tp->write32 = tg3_write_flush_reg32;
16135         }
16136
16137         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16138                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16139                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16140                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16141         }
16142
16143         if (tg3_flag(tp, ICH_WORKAROUND)) {
16144                 tp->read32 = tg3_read_indirect_reg32;
16145                 tp->write32 = tg3_write_indirect_reg32;
16146                 tp->read32_mbox = tg3_read_indirect_mbox;
16147                 tp->write32_mbox = tg3_write_indirect_mbox;
16148                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16149                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16150
16151                 iounmap(tp->regs);
16152                 tp->regs = NULL;
16153
16154                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16155                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16156                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16157         }
16158         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16159                 tp->read32_mbox = tg3_read32_mbox_5906;
16160                 tp->write32_mbox = tg3_write32_mbox_5906;
16161                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16162                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16163         }
16164
16165         if (tp->write32 == tg3_write_indirect_reg32 ||
16166             (tg3_flag(tp, PCIX_MODE) &&
16167              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16168               tg3_asic_rev(tp) == ASIC_REV_5701)))
16169                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16170
16171         /* The memory arbiter has to be enabled in order for SRAM accesses
16172          * to succeed.  Normally on powerup the tg3 chip firmware will make
16173          * sure it is enabled, but other entities such as system netboot
16174          * code might disable it.
16175          */
16176         val = tr32(MEMARB_MODE);
16177         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16178
16179         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16180         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16181             tg3_flag(tp, 5780_CLASS)) {
16182                 if (tg3_flag(tp, PCIX_MODE)) {
16183                         pci_read_config_dword(tp->pdev,
16184                                               tp->pcix_cap + PCI_X_STATUS,
16185                                               &val);
16186                         tp->pci_fn = val & 0x7;
16187                 }
16188         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16189                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16190                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16191                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16192                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16193                         val = tr32(TG3_CPMU_STATUS);
16194
16195                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16196                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16197                 else
16198                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16199                                      TG3_CPMU_STATUS_FSHFT_5719;
16200         }
16201
16202         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16203                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16204                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16205         }
16206
16207         /* Get eeprom hw config before calling tg3_set_power_state().
16208          * In particular, the TG3_FLAG_IS_NIC flag must be
16209          * determined before calling tg3_set_power_state() so that
16210          * we know whether or not to switch out of Vaux power.
16211          * When the flag is set, it means that GPIO1 is used for eeprom
16212          * write protect and also implies that it is a LOM where GPIOs
16213          * are not used to switch power.
16214          */
16215         tg3_get_eeprom_hw_cfg(tp);
16216
16217         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16218                 tg3_flag_clear(tp, TSO_CAPABLE);
16219                 tg3_flag_clear(tp, TSO_BUG);
16220                 tp->fw_needed = NULL;
16221         }
16222
16223         if (tg3_flag(tp, ENABLE_APE)) {
16224                 /* Allow reads and writes to the
16225                  * APE register and memory space.
16226                  */
16227                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16228                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16229                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16230                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16231                                        pci_state_reg);
16232
16233                 tg3_ape_lock_init(tp);
16234         }
16235
16236         /* Set up tp->grc_local_ctrl before calling
16237          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16238          * will bring 5700's external PHY out of reset.
16239          * It is also used as eeprom write protect on LOMs.
16240          */
16241         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16242         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16243             tg3_flag(tp, EEPROM_WRITE_PROT))
16244                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16245                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16246         /* Unused GPIO3 must be driven as output on 5752 because there
16247          * are no pull-up resistors on unused GPIO pins.
16248          */
16249         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16250                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16251
16252         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16253             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16254             tg3_flag(tp, 57765_CLASS))
16255                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16256
16257         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16258             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16259                 /* Turn off the debug UART. */
16260                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16261                 if (tg3_flag(tp, IS_NIC))
16262                         /* Keep VMain power. */
16263                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16264                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16265         }
16266
16267         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16268                 tp->grc_local_ctrl |=
16269                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16270
16271         /* Switch out of Vaux if it is a NIC */
16272         tg3_pwrsrc_switch_to_vmain(tp);
16273
16274         /* Derive initial jumbo mode from MTU assigned in
16275          * ether_setup() via the alloc_etherdev() call
16276          */
16277         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16278                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16279
16280         /* Determine WakeOnLan speed to use. */
16281         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16282             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16283             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16284             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16285                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16286         } else {
16287                 tg3_flag_set(tp, WOL_SPEED_100MB);
16288         }
16289
16290         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16291                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16292
16293         /* A few boards don't want Ethernet@WireSpeed phy feature */
16294         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16295             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16296              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16297              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16298             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16299             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16300                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16301
16302         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16303             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16304                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16305         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16306                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16307
16308         if (tg3_flag(tp, 5705_PLUS) &&
16309             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16310             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16311             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16312             !tg3_flag(tp, 57765_PLUS)) {
16313                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16314                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16315                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16316                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16317                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16318                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16319                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16320                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16321                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16322                 } else
16323                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16324         }
16325
16326         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16327             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16328                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16329                 if (tp->phy_otp == 0)
16330                         tp->phy_otp = TG3_OTP_DEFAULT;
16331         }
16332
16333         if (tg3_flag(tp, CPMU_PRESENT))
16334                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16335         else
16336                 tp->mi_mode = MAC_MI_MODE_BASE;
16337
16338         tp->coalesce_mode = 0;
16339         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16340             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16341                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16342
16343         /* Set these bits to enable statistics workaround. */
16344         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16345             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16346             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16347                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16348                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16349         }
16350
16351         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16352             tg3_asic_rev(tp) == ASIC_REV_57780)
16353                 tg3_flag_set(tp, USE_PHYLIB);
16354
16355         err = tg3_mdio_init(tp);
16356         if (err)
16357                 return err;
16358
16359         /* Initialize data/descriptor byte/word swapping. */
16360         val = tr32(GRC_MODE);
16361         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16362             tg3_asic_rev(tp) == ASIC_REV_5762)
16363                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16364                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16365                         GRC_MODE_B2HRX_ENABLE |
16366                         GRC_MODE_HTX2B_ENABLE |
16367                         GRC_MODE_HOST_STACKUP);
16368         else
16369                 val &= GRC_MODE_HOST_STACKUP;
16370
16371         tw32(GRC_MODE, val | tp->grc_mode);
16372
16373         tg3_switch_clocks(tp);
16374
16375         /* Clear this out for sanity. */
16376         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16377
16378         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16379                               &pci_state_reg);
16380         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16381             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16382                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16383                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16384                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16385                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16386                         void __iomem *sram_base;
16387
16388                         /* Write some dummy words into the SRAM status block
16389                          * area, see if it reads back correctly.  If the return
16390                          * value is bad, force enable the PCIX workaround.
16391                          */
16392                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16393
16394                         writel(0x00000000, sram_base);
16395                         writel(0x00000000, sram_base + 4);
16396                         writel(0xffffffff, sram_base + 4);
16397                         if (readl(sram_base) != 0x00000000)
16398                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16399                 }
16400         }
16401
16402         udelay(50);
16403         tg3_nvram_init(tp);
16404
16405         /* If the device has an NVRAM, no need to load patch firmware */
16406         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16407             !tg3_flag(tp, NO_NVRAM))
16408                 tp->fw_needed = NULL;
16409
16410         grc_misc_cfg = tr32(GRC_MISC_CFG);
16411         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16412
16413         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16414             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16415              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16416                 tg3_flag_set(tp, IS_5788);
16417
16418         if (!tg3_flag(tp, IS_5788) &&
16419             tg3_asic_rev(tp) != ASIC_REV_5700)
16420                 tg3_flag_set(tp, TAGGED_STATUS);
16421         if (tg3_flag(tp, TAGGED_STATUS)) {
16422                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16423                                       HOSTCC_MODE_CLRTICK_TXBD);
16424
16425                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16426                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16427                                        tp->misc_host_ctrl);
16428         }
16429
16430         /* Preserve the APE MAC_MODE bits */
16431         if (tg3_flag(tp, ENABLE_APE))
16432                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16433         else
16434                 tp->mac_mode = 0;
16435
16436         if (tg3_10_100_only_device(tp, ent))
16437                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16438
16439         err = tg3_phy_probe(tp);
16440         if (err) {
16441                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16442                 /* ... but do not return immediately ... */
16443                 tg3_mdio_fini(tp);
16444         }
16445
16446         tg3_read_vpd(tp);
16447         tg3_read_fw_ver(tp);
16448
16449         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16450                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16451         } else {
16452                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16453                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16454                 else
16455                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16456         }
16457
16458         /* 5700 {AX,BX} chips have a broken status block link
16459          * change bit implementation, so we must use the
16460          * status register in those cases.
16461          */
16462         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16463                 tg3_flag_set(tp, USE_LINKCHG_REG);
16464         else
16465                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16466
16467         /* The led_ctrl is set during tg3_phy_probe, here we might
16468          * have to force the link status polling mechanism based
16469          * upon subsystem IDs.
16470          */
16471         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16472             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16473             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16474                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16475                 tg3_flag_set(tp, USE_LINKCHG_REG);
16476         }
16477
16478         /* For all SERDES we poll the MAC status register. */
16479         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16480                 tg3_flag_set(tp, POLL_SERDES);
16481         else
16482                 tg3_flag_clear(tp, POLL_SERDES);
16483
16484         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16485         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16486         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16487             tg3_flag(tp, PCIX_MODE)) {
16488                 tp->rx_offset = NET_SKB_PAD;
16489 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16490                 tp->rx_copy_thresh = ~(u16)0;
16491 #endif
16492         }
16493
16494         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16495         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16496         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16497
16498         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16499
16500         /* Increment the rx prod index on the rx std ring by at most
16501          * 8 for these chips to workaround hw errata.
16502          */
16503         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16504             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16505             tg3_asic_rev(tp) == ASIC_REV_5755)
16506                 tp->rx_std_max_post = 8;
16507
16508         if (tg3_flag(tp, ASPM_WORKAROUND))
16509                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16510                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16511
16512         return err;
16513 }
16514
16515 #ifdef CONFIG_SPARC
16516 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16517 {
16518         struct net_device *dev = tp->dev;
16519         struct pci_dev *pdev = tp->pdev;
16520         struct device_node *dp = pci_device_to_OF_node(pdev);
16521         const unsigned char *addr;
16522         int len;
16523
16524         addr = of_get_property(dp, "local-mac-address", &len);
16525         if (addr && len == 6) {
16526                 memcpy(dev->dev_addr, addr, 6);
16527                 return 0;
16528         }
16529         return -ENODEV;
16530 }
16531
16532 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16533 {
16534         struct net_device *dev = tp->dev;
16535
16536         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16537         return 0;
16538 }
16539 #endif
16540
16541 static int tg3_get_device_address(struct tg3 *tp)
16542 {
16543         struct net_device *dev = tp->dev;
16544         u32 hi, lo, mac_offset;
16545         int addr_ok = 0;
16546         int err;
16547
16548 #ifdef CONFIG_SPARC
16549         if (!tg3_get_macaddr_sparc(tp))
16550                 return 0;
16551 #endif
16552
16553         if (tg3_flag(tp, IS_SSB_CORE)) {
16554                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16555                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16556                         return 0;
16557         }
16558
16559         mac_offset = 0x7c;
16560         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16561             tg3_flag(tp, 5780_CLASS)) {
16562                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16563                         mac_offset = 0xcc;
16564                 if (tg3_nvram_lock(tp))
16565                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16566                 else
16567                         tg3_nvram_unlock(tp);
16568         } else if (tg3_flag(tp, 5717_PLUS)) {
16569                 if (tp->pci_fn & 1)
16570                         mac_offset = 0xcc;
16571                 if (tp->pci_fn > 1)
16572                         mac_offset += 0x18c;
16573         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16574                 mac_offset = 0x10;
16575
16576         /* First try to get it from MAC address mailbox. */
16577         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16578         if ((hi >> 16) == 0x484b) {
16579                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16580                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16581
16582                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16583                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16584                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16585                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16586                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16587
16588                 /* Some old bootcode may report a 0 MAC address in SRAM */
16589                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16590         }
16591         if (!addr_ok) {
16592                 /* Next, try NVRAM. */
16593                 if (!tg3_flag(tp, NO_NVRAM) &&
16594                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16595                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16596                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16597                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16598                 }
16599                 /* Finally just fetch it out of the MAC control regs. */
16600                 else {
16601                         hi = tr32(MAC_ADDR_0_HIGH);
16602                         lo = tr32(MAC_ADDR_0_LOW);
16603
16604                         dev->dev_addr[5] = lo & 0xff;
16605                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16606                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16607                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16608                         dev->dev_addr[1] = hi & 0xff;
16609                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16610                 }
16611         }
16612
16613         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16614 #ifdef CONFIG_SPARC
16615                 if (!tg3_get_default_macaddr_sparc(tp))
16616                         return 0;
16617 #endif
16618                 return -EINVAL;
16619         }
16620         return 0;
16621 }
16622
16623 #define BOUNDARY_SINGLE_CACHELINE       1
16624 #define BOUNDARY_MULTI_CACHELINE        2
16625
16626 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16627 {
16628         int cacheline_size;
16629         u8 byte;
16630         int goal;
16631
16632         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16633         if (byte == 0)
16634                 cacheline_size = 1024;
16635         else
16636                 cacheline_size = (int) byte * 4;
16637
16638         /* On 5703 and later chips, the boundary bits have no
16639          * effect.
16640          */
16641         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16642             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16643             !tg3_flag(tp, PCI_EXPRESS))
16644                 goto out;
16645
16646 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16647         goal = BOUNDARY_MULTI_CACHELINE;
16648 #else
16649 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16650         goal = BOUNDARY_SINGLE_CACHELINE;
16651 #else
16652         goal = 0;
16653 #endif
16654 #endif
16655
16656         if (tg3_flag(tp, 57765_PLUS)) {
16657                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16658                 goto out;
16659         }
16660
16661         if (!goal)
16662                 goto out;
16663
16664         /* PCI controllers on most RISC systems tend to disconnect
16665          * when a device tries to burst across a cache-line boundary.
16666          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16667          *
16668          * Unfortunately, for PCI-E there are only limited
16669          * write-side controls for this, and thus for reads
16670          * we will still get the disconnects.  We'll also waste
16671          * these PCI cycles for both read and write for chips
16672          * other than 5700 and 5701 which do not implement the
16673          * boundary bits.
16674          */
16675         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16676                 switch (cacheline_size) {
16677                 case 16:
16678                 case 32:
16679                 case 64:
16680                 case 128:
16681                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16682                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16683                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16684                         } else {
16685                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16686                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16687                         }
16688                         break;
16689
16690                 case 256:
16691                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16692                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16693                         break;
16694
16695                 default:
16696                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16697                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16698                         break;
16699                 }
16700         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16701                 switch (cacheline_size) {
16702                 case 16:
16703                 case 32:
16704                 case 64:
16705                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16706                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16707                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16708                                 break;
16709                         }
16710                         /* fallthrough */
16711                 case 128:
16712                 default:
16713                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16714                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16715                         break;
16716                 }
16717         } else {
16718                 switch (cacheline_size) {
16719                 case 16:
16720                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16721                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16722                                         DMA_RWCTRL_WRITE_BNDRY_16);
16723                                 break;
16724                         }
16725                         /* fallthrough */
16726                 case 32:
16727                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16728                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16729                                         DMA_RWCTRL_WRITE_BNDRY_32);
16730                                 break;
16731                         }
16732                         /* fallthrough */
16733                 case 64:
16734                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16735                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16736                                         DMA_RWCTRL_WRITE_BNDRY_64);
16737                                 break;
16738                         }
16739                         /* fallthrough */
16740                 case 128:
16741                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16742                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16743                                         DMA_RWCTRL_WRITE_BNDRY_128);
16744                                 break;
16745                         }
16746                         /* fallthrough */
16747                 case 256:
16748                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16749                                 DMA_RWCTRL_WRITE_BNDRY_256);
16750                         break;
16751                 case 512:
16752                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16753                                 DMA_RWCTRL_WRITE_BNDRY_512);
16754                         break;
16755                 case 1024:
16756                 default:
16757                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16758                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16759                         break;
16760                 }
16761         }
16762
16763 out:
16764         return val;
16765 }
16766
16767 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16768                            int size, bool to_device)
16769 {
16770         struct tg3_internal_buffer_desc test_desc;
16771         u32 sram_dma_descs;
16772         int i, ret;
16773
16774         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16775
16776         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16777         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16778         tw32(RDMAC_STATUS, 0);
16779         tw32(WDMAC_STATUS, 0);
16780
16781         tw32(BUFMGR_MODE, 0);
16782         tw32(FTQ_RESET, 0);
16783
16784         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16785         test_desc.addr_lo = buf_dma & 0xffffffff;
16786         test_desc.nic_mbuf = 0x00002100;
16787         test_desc.len = size;
16788
16789         /*
16790          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16791          * the *second* time the tg3 driver was getting loaded after an
16792          * initial scan.
16793          *
16794          * Broadcom tells me:
16795          *   ...the DMA engine is connected to the GRC block and a DMA
16796          *   reset may affect the GRC block in some unpredictable way...
16797          *   The behavior of resets to individual blocks has not been tested.
16798          *
16799          * Broadcom noted the GRC reset will also reset all sub-components.
16800          */
16801         if (to_device) {
16802                 test_desc.cqid_sqid = (13 << 8) | 2;
16803
16804                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16805                 udelay(40);
16806         } else {
16807                 test_desc.cqid_sqid = (16 << 8) | 7;
16808
16809                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16810                 udelay(40);
16811         }
16812         test_desc.flags = 0x00000005;
16813
16814         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16815                 u32 val;
16816
16817                 val = *(((u32 *)&test_desc) + i);
16818                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16819                                        sram_dma_descs + (i * sizeof(u32)));
16820                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16821         }
16822         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16823
16824         if (to_device)
16825                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16826         else
16827                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16828
16829         ret = -ENODEV;
16830         for (i = 0; i < 40; i++) {
16831                 u32 val;
16832
16833                 if (to_device)
16834                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16835                 else
16836                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16837                 if ((val & 0xffff) == sram_dma_descs) {
16838                         ret = 0;
16839                         break;
16840                 }
16841
16842                 udelay(100);
16843         }
16844
16845         return ret;
16846 }
16847
16848 #define TEST_BUFFER_SIZE        0x2000
16849
16850 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16851         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16852         { },
16853 };
16854
16855 static int tg3_test_dma(struct tg3 *tp)
16856 {
16857         dma_addr_t buf_dma;
16858         u32 *buf, saved_dma_rwctrl;
16859         int ret = 0;
16860
16861         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16862                                  &buf_dma, GFP_KERNEL);
16863         if (!buf) {
16864                 ret = -ENOMEM;
16865                 goto out_nofree;
16866         }
16867
16868         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16869                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16870
16871         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16872
16873         if (tg3_flag(tp, 57765_PLUS))
16874                 goto out;
16875
16876         if (tg3_flag(tp, PCI_EXPRESS)) {
16877                 /* DMA read watermark not used on PCIE */
16878                 tp->dma_rwctrl |= 0x00180000;
16879         } else if (!tg3_flag(tp, PCIX_MODE)) {
16880                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16881                     tg3_asic_rev(tp) == ASIC_REV_5750)
16882                         tp->dma_rwctrl |= 0x003f0000;
16883                 else
16884                         tp->dma_rwctrl |= 0x003f000f;
16885         } else {
16886                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16887                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16888                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16889                         u32 read_water = 0x7;
16890
16891                         /* If the 5704 is behind the EPB bridge, we can
16892                          * do the less restrictive ONE_DMA workaround for
16893                          * better performance.
16894                          */
16895                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16896                             tg3_asic_rev(tp) == ASIC_REV_5704)
16897                                 tp->dma_rwctrl |= 0x8000;
16898                         else if (ccval == 0x6 || ccval == 0x7)
16899                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16900
16901                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16902                                 read_water = 4;
16903                         /* Set bit 23 to enable PCIX hw bug fix */
16904                         tp->dma_rwctrl |=
16905                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16906                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16907                                 (1 << 23);
16908                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16909                         /* 5780 always in PCIX mode */
16910                         tp->dma_rwctrl |= 0x00144000;
16911                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16912                         /* 5714 always in PCIX mode */
16913                         tp->dma_rwctrl |= 0x00148000;
16914                 } else {
16915                         tp->dma_rwctrl |= 0x001b000f;
16916                 }
16917         }
16918         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16919                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16920
16921         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16922             tg3_asic_rev(tp) == ASIC_REV_5704)
16923                 tp->dma_rwctrl &= 0xfffffff0;
16924
16925         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16926             tg3_asic_rev(tp) == ASIC_REV_5701) {
16927                 /* Remove this if it causes problems for some boards. */
16928                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16929
16930                 /* On 5700/5701 chips, we need to set this bit.
16931                  * Otherwise the chip will issue cacheline transactions
16932                  * to streamable DMA memory with not all the byte
16933                  * enables turned on.  This is an error on several
16934                  * RISC PCI controllers, in particular sparc64.
16935                  *
16936                  * On 5703/5704 chips, this bit has been reassigned
16937                  * a different meaning.  In particular, it is used
16938                  * on those chips to enable a PCI-X workaround.
16939                  */
16940                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16941         }
16942
16943         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16944
16945 #if 0
16946         /* Unneeded, already done by tg3_get_invariants.  */
16947         tg3_switch_clocks(tp);
16948 #endif
16949
16950         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16951             tg3_asic_rev(tp) != ASIC_REV_5701)
16952                 goto out;
16953
16954         /* It is best to perform DMA test with maximum write burst size
16955          * to expose the 5700/5701 write DMA bug.
16956          */
16957         saved_dma_rwctrl = tp->dma_rwctrl;
16958         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16959         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16960
16961         while (1) {
16962                 u32 *p = buf, i;
16963
16964                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16965                         p[i] = i;
16966
16967                 /* Send the buffer to the chip. */
16968                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16969                 if (ret) {
16970                         dev_err(&tp->pdev->dev,
16971                                 "%s: Buffer write failed. err = %d\n",
16972                                 __func__, ret);
16973                         break;
16974                 }
16975
16976 #if 0
16977                 /* validate data reached card RAM correctly. */
16978                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16979                         u32 val;
16980                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16981                         if (le32_to_cpu(val) != p[i]) {
16982                                 dev_err(&tp->pdev->dev,
16983                                         "%s: Buffer corrupted on device! "
16984                                         "(%d != %d)\n", __func__, val, i);
16985                                 /* ret = -ENODEV here? */
16986                         }
16987                         p[i] = 0;
16988                 }
16989 #endif
16990                 /* Now read it back. */
16991                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16992                 if (ret) {
16993                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16994                                 "err = %d\n", __func__, ret);
16995                         break;
16996                 }
16997
16998                 /* Verify it. */
16999                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17000                         if (p[i] == i)
17001                                 continue;
17002
17003                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17004                             DMA_RWCTRL_WRITE_BNDRY_16) {
17005                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17006                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17007                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17008                                 break;
17009                         } else {
17010                                 dev_err(&tp->pdev->dev,
17011                                         "%s: Buffer corrupted on read back! "
17012                                         "(%d != %d)\n", __func__, p[i], i);
17013                                 ret = -ENODEV;
17014                                 goto out;
17015                         }
17016                 }
17017
17018                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17019                         /* Success. */
17020                         ret = 0;
17021                         break;
17022                 }
17023         }
17024         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17025             DMA_RWCTRL_WRITE_BNDRY_16) {
17026                 /* DMA test passed without adjusting DMA boundary,
17027                  * now look for chipsets that are known to expose the
17028                  * DMA bug without failing the test.
17029                  */
17030                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17031                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17032                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17033                 } else {
17034                         /* Safe to use the calculated DMA boundary. */
17035                         tp->dma_rwctrl = saved_dma_rwctrl;
17036                 }
17037
17038                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17039         }
17040
17041 out:
17042         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17043 out_nofree:
17044         return ret;
17045 }
17046
17047 static void tg3_init_bufmgr_config(struct tg3 *tp)
17048 {
17049         if (tg3_flag(tp, 57765_PLUS)) {
17050                 tp->bufmgr_config.mbuf_read_dma_low_water =
17051                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17052                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17053                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17054                 tp->bufmgr_config.mbuf_high_water =
17055                         DEFAULT_MB_HIGH_WATER_57765;
17056
17057                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17058                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17059                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17060                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17061                 tp->bufmgr_config.mbuf_high_water_jumbo =
17062                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17063         } else if (tg3_flag(tp, 5705_PLUS)) {
17064                 tp->bufmgr_config.mbuf_read_dma_low_water =
17065                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17066                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17067                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17068                 tp->bufmgr_config.mbuf_high_water =
17069                         DEFAULT_MB_HIGH_WATER_5705;
17070                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17071                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17072                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17073                         tp->bufmgr_config.mbuf_high_water =
17074                                 DEFAULT_MB_HIGH_WATER_5906;
17075                 }
17076
17077                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17078                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17079                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17080                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17081                 tp->bufmgr_config.mbuf_high_water_jumbo =
17082                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17083         } else {
17084                 tp->bufmgr_config.mbuf_read_dma_low_water =
17085                         DEFAULT_MB_RDMA_LOW_WATER;
17086                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17087                         DEFAULT_MB_MACRX_LOW_WATER;
17088                 tp->bufmgr_config.mbuf_high_water =
17089                         DEFAULT_MB_HIGH_WATER;
17090
17091                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17092                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17093                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17094                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17095                 tp->bufmgr_config.mbuf_high_water_jumbo =
17096                         DEFAULT_MB_HIGH_WATER_JUMBO;
17097         }
17098
17099         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17100         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17101 }
17102
17103 static char *tg3_phy_string(struct tg3 *tp)
17104 {
17105         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17106         case TG3_PHY_ID_BCM5400:        return "5400";
17107         case TG3_PHY_ID_BCM5401:        return "5401";
17108         case TG3_PHY_ID_BCM5411:        return "5411";
17109         case TG3_PHY_ID_BCM5701:        return "5701";
17110         case TG3_PHY_ID_BCM5703:        return "5703";
17111         case TG3_PHY_ID_BCM5704:        return "5704";
17112         case TG3_PHY_ID_BCM5705:        return "5705";
17113         case TG3_PHY_ID_BCM5750:        return "5750";
17114         case TG3_PHY_ID_BCM5752:        return "5752";
17115         case TG3_PHY_ID_BCM5714:        return "5714";
17116         case TG3_PHY_ID_BCM5780:        return "5780";
17117         case TG3_PHY_ID_BCM5755:        return "5755";
17118         case TG3_PHY_ID_BCM5787:        return "5787";
17119         case TG3_PHY_ID_BCM5784:        return "5784";
17120         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17121         case TG3_PHY_ID_BCM5906:        return "5906";
17122         case TG3_PHY_ID_BCM5761:        return "5761";
17123         case TG3_PHY_ID_BCM5718C:       return "5718C";
17124         case TG3_PHY_ID_BCM5718S:       return "5718S";
17125         case TG3_PHY_ID_BCM57765:       return "57765";
17126         case TG3_PHY_ID_BCM5719C:       return "5719C";
17127         case TG3_PHY_ID_BCM5720C:       return "5720C";
17128         case TG3_PHY_ID_BCM5762:        return "5762C";
17129         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17130         case 0:                 return "serdes";
17131         default:                return "unknown";
17132         }
17133 }
17134
17135 static char *tg3_bus_string(struct tg3 *tp, char *str)
17136 {
17137         if (tg3_flag(tp, PCI_EXPRESS)) {
17138                 strcpy(str, "PCI Express");
17139                 return str;
17140         } else if (tg3_flag(tp, PCIX_MODE)) {
17141                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17142
17143                 strcpy(str, "PCIX:");
17144
17145                 if ((clock_ctrl == 7) ||
17146                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17147                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17148                         strcat(str, "133MHz");
17149                 else if (clock_ctrl == 0)
17150                         strcat(str, "33MHz");
17151                 else if (clock_ctrl == 2)
17152                         strcat(str, "50MHz");
17153                 else if (clock_ctrl == 4)
17154                         strcat(str, "66MHz");
17155                 else if (clock_ctrl == 6)
17156                         strcat(str, "100MHz");
17157         } else {
17158                 strcpy(str, "PCI:");
17159                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17160                         strcat(str, "66MHz");
17161                 else
17162                         strcat(str, "33MHz");
17163         }
17164         if (tg3_flag(tp, PCI_32BIT))
17165                 strcat(str, ":32-bit");
17166         else
17167                 strcat(str, ":64-bit");
17168         return str;
17169 }
17170
17171 static void tg3_init_coal(struct tg3 *tp)
17172 {
17173         struct ethtool_coalesce *ec = &tp->coal;
17174
17175         memset(ec, 0, sizeof(*ec));
17176         ec->cmd = ETHTOOL_GCOALESCE;
17177         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17178         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17179         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17180         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17181         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17182         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17183         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17184         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17185         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17186
17187         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17188                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17189                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17190                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17191                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17192                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17193         }
17194
17195         if (tg3_flag(tp, 5705_PLUS)) {
17196                 ec->rx_coalesce_usecs_irq = 0;
17197                 ec->tx_coalesce_usecs_irq = 0;
17198                 ec->stats_block_coalesce_usecs = 0;
17199         }
17200 }
17201
17202 static int tg3_init_one(struct pci_dev *pdev,
17203                                   const struct pci_device_id *ent)
17204 {
17205         struct net_device *dev;
17206         struct tg3 *tp;
17207         int i, err;
17208         u32 sndmbx, rcvmbx, intmbx;
17209         char str[40];
17210         u64 dma_mask, persist_dma_mask;
17211         netdev_features_t features = 0;
17212
17213         printk_once(KERN_INFO "%s\n", version);
17214
17215         err = pci_enable_device(pdev);
17216         if (err) {
17217                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17218                 return err;
17219         }
17220
17221         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17222         if (err) {
17223                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17224                 goto err_out_disable_pdev;
17225         }
17226
17227         pci_set_master(pdev);
17228
17229         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17230         if (!dev) {
17231                 err = -ENOMEM;
17232                 goto err_out_free_res;
17233         }
17234
17235         SET_NETDEV_DEV(dev, &pdev->dev);
17236
17237         tp = netdev_priv(dev);
17238         tp->pdev = pdev;
17239         tp->dev = dev;
17240         tp->pm_cap = pdev->pm_cap;
17241         tp->rx_mode = TG3_DEF_RX_MODE;
17242         tp->tx_mode = TG3_DEF_TX_MODE;
17243         tp->irq_sync = 1;
17244
17245         if (tg3_debug > 0)
17246                 tp->msg_enable = tg3_debug;
17247         else
17248                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17249
17250         if (pdev_is_ssb_gige_core(pdev)) {
17251                 tg3_flag_set(tp, IS_SSB_CORE);
17252                 if (ssb_gige_must_flush_posted_writes(pdev))
17253                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17254                 if (ssb_gige_one_dma_at_once(pdev))
17255                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17256                 if (ssb_gige_have_roboswitch(pdev))
17257                         tg3_flag_set(tp, ROBOSWITCH);
17258                 if (ssb_gige_is_rgmii(pdev))
17259                         tg3_flag_set(tp, RGMII_MODE);
17260         }
17261
17262         /* The word/byte swap controls here control register access byte
17263          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17264          * setting below.
17265          */
17266         tp->misc_host_ctrl =
17267                 MISC_HOST_CTRL_MASK_PCI_INT |
17268                 MISC_HOST_CTRL_WORD_SWAP |
17269                 MISC_HOST_CTRL_INDIR_ACCESS |
17270                 MISC_HOST_CTRL_PCISTATE_RW;
17271
17272         /* The NONFRM (non-frame) byte/word swap controls take effect
17273          * on descriptor entries, anything which isn't packet data.
17274          *
17275          * The StrongARM chips on the board (one for tx, one for rx)
17276          * are running in big-endian mode.
17277          */
17278         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17279                         GRC_MODE_WSWAP_NONFRM_DATA);
17280 #ifdef __BIG_ENDIAN
17281         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17282 #endif
17283         spin_lock_init(&tp->lock);
17284         spin_lock_init(&tp->indirect_lock);
17285         INIT_WORK(&tp->reset_task, tg3_reset_task);
17286
17287         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17288         if (!tp->regs) {
17289                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17290                 err = -ENOMEM;
17291                 goto err_out_free_dev;
17292         }
17293
17294         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17295             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17296             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17297             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17298             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17299             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17300             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17301             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17302             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17303             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17304             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17305             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17306                 tg3_flag_set(tp, ENABLE_APE);
17307                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17308                 if (!tp->aperegs) {
17309                         dev_err(&pdev->dev,
17310                                 "Cannot map APE registers, aborting\n");
17311                         err = -ENOMEM;
17312                         goto err_out_iounmap;
17313                 }
17314         }
17315
17316         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17317         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17318
17319         dev->ethtool_ops = &tg3_ethtool_ops;
17320         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17321         dev->netdev_ops = &tg3_netdev_ops;
17322         dev->irq = pdev->irq;
17323
17324         err = tg3_get_invariants(tp, ent);
17325         if (err) {
17326                 dev_err(&pdev->dev,
17327                         "Problem fetching invariants of chip, aborting\n");
17328                 goto err_out_apeunmap;
17329         }
17330
17331         /* The EPB bridge inside 5714, 5715, and 5780 and any
17332          * device behind the EPB cannot support DMA addresses > 40-bit.
17333          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17334          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17335          * do DMA address check in tg3_start_xmit().
17336          */
17337         if (tg3_flag(tp, IS_5788))
17338                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17339         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17340                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17341 #ifdef CONFIG_HIGHMEM
17342                 dma_mask = DMA_BIT_MASK(64);
17343 #endif
17344         } else
17345                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17346
17347         /* Configure DMA attributes. */
17348         if (dma_mask > DMA_BIT_MASK(32)) {
17349                 err = pci_set_dma_mask(pdev, dma_mask);
17350                 if (!err) {
17351                         features |= NETIF_F_HIGHDMA;
17352                         err = pci_set_consistent_dma_mask(pdev,
17353                                                           persist_dma_mask);
17354                         if (err < 0) {
17355                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17356                                         "DMA for consistent allocations\n");
17357                                 goto err_out_apeunmap;
17358                         }
17359                 }
17360         }
17361         if (err || dma_mask == DMA_BIT_MASK(32)) {
17362                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17363                 if (err) {
17364                         dev_err(&pdev->dev,
17365                                 "No usable DMA configuration, aborting\n");
17366                         goto err_out_apeunmap;
17367                 }
17368         }
17369
17370         tg3_init_bufmgr_config(tp);
17371
17372         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17373
17374         /* 5700 B0 chips do not support checksumming correctly due
17375          * to hardware bugs.
17376          */
17377         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17378                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17379
17380                 if (tg3_flag(tp, 5755_PLUS))
17381                         features |= NETIF_F_IPV6_CSUM;
17382         }
17383
17384         /* TSO is on by default on chips that support hardware TSO.
17385          * Firmware TSO on older chips gives lower performance, so it
17386          * is off by default, but can be enabled using ethtool.
17387          */
17388         if ((tg3_flag(tp, HW_TSO_1) ||
17389              tg3_flag(tp, HW_TSO_2) ||
17390              tg3_flag(tp, HW_TSO_3)) &&
17391             (features & NETIF_F_IP_CSUM))
17392                 features |= NETIF_F_TSO;
17393         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17394                 if (features & NETIF_F_IPV6_CSUM)
17395                         features |= NETIF_F_TSO6;
17396                 if (tg3_flag(tp, HW_TSO_3) ||
17397                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17398                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17399                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17400                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17401                     tg3_asic_rev(tp) == ASIC_REV_57780)
17402                         features |= NETIF_F_TSO_ECN;
17403         }
17404
17405         dev->features |= features;
17406         dev->vlan_features |= features;
17407
17408         /*
17409          * Add loopback capability only for a subset of devices that support
17410          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17411          * loopback for the remaining devices.
17412          */
17413         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17414             !tg3_flag(tp, CPMU_PRESENT))
17415                 /* Add the loopback capability */
17416                 features |= NETIF_F_LOOPBACK;
17417
17418         dev->hw_features |= features;
17419
17420         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17421             !tg3_flag(tp, TSO_CAPABLE) &&
17422             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17423                 tg3_flag_set(tp, MAX_RXPEND_64);
17424                 tp->rx_pending = 63;
17425         }
17426
17427         err = tg3_get_device_address(tp);
17428         if (err) {
17429                 dev_err(&pdev->dev,
17430                         "Could not obtain valid ethernet address, aborting\n");
17431                 goto err_out_apeunmap;
17432         }
17433
17434         /*
17435          * Reset chip in case UNDI or EFI driver did not shutdown
17436          * DMA self test will enable WDMAC and we'll see (spurious)
17437          * pending DMA on the PCI bus at that point.
17438          */
17439         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17440             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17441                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17442                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17443         }
17444
17445         err = tg3_test_dma(tp);
17446         if (err) {
17447                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17448                 goto err_out_apeunmap;
17449         }
17450
17451         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17452         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17453         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17454         for (i = 0; i < tp->irq_max; i++) {
17455                 struct tg3_napi *tnapi = &tp->napi[i];
17456
17457                 tnapi->tp = tp;
17458                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17459
17460                 tnapi->int_mbox = intmbx;
17461                 if (i <= 4)
17462                         intmbx += 0x8;
17463                 else
17464                         intmbx += 0x4;
17465
17466                 tnapi->consmbox = rcvmbx;
17467                 tnapi->prodmbox = sndmbx;
17468
17469                 if (i)
17470                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17471                 else
17472                         tnapi->coal_now = HOSTCC_MODE_NOW;
17473
17474                 if (!tg3_flag(tp, SUPPORT_MSIX))
17475                         break;
17476
17477                 /*
17478                  * If we support MSIX, we'll be using RSS.  If we're using
17479                  * RSS, the first vector only handles link interrupts and the
17480                  * remaining vectors handle rx and tx interrupts.  Reuse the
17481                  * mailbox values for the next iteration.  The values we setup
17482                  * above are still useful for the single vectored mode.
17483                  */
17484                 if (!i)
17485                         continue;
17486
17487                 rcvmbx += 0x8;
17488
17489                 if (sndmbx & 0x4)
17490                         sndmbx -= 0x4;
17491                 else
17492                         sndmbx += 0xc;
17493         }
17494
17495         tg3_init_coal(tp);
17496
17497         pci_set_drvdata(pdev, dev);
17498
17499         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17500             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17501             tg3_asic_rev(tp) == ASIC_REV_5762)
17502                 tg3_flag_set(tp, PTP_CAPABLE);
17503
17504         if (tg3_flag(tp, 5717_PLUS)) {
17505                 /* Resume a low-power mode */
17506                 tg3_frob_aux_power(tp, false);
17507         }
17508
17509         tg3_timer_init(tp);
17510
17511         tg3_carrier_off(tp);
17512
17513         err = register_netdev(dev);
17514         if (err) {
17515                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17516                 goto err_out_apeunmap;
17517         }
17518
17519         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17520                     tp->board_part_number,
17521                     tg3_chip_rev_id(tp),
17522                     tg3_bus_string(tp, str),
17523                     dev->dev_addr);
17524
17525         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17526                 struct phy_device *phydev;
17527                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17528                 netdev_info(dev,
17529                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17530                             phydev->drv->name, dev_name(&phydev->dev));
17531         } else {
17532                 char *ethtype;
17533
17534                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17535                         ethtype = "10/100Base-TX";
17536                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17537                         ethtype = "1000Base-SX";
17538                 else
17539                         ethtype = "10/100/1000Base-T";
17540
17541                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17542                             "(WireSpeed[%d], EEE[%d])\n",
17543                             tg3_phy_string(tp), ethtype,
17544                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17545                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17546         }
17547
17548         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17549                     (dev->features & NETIF_F_RXCSUM) != 0,
17550                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17551                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17552                     tg3_flag(tp, ENABLE_ASF) != 0,
17553                     tg3_flag(tp, TSO_CAPABLE) != 0);
17554         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17555                     tp->dma_rwctrl,
17556                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17557                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17558
17559         pci_save_state(pdev);
17560
17561         return 0;
17562
17563 err_out_apeunmap:
17564         if (tp->aperegs) {
17565                 iounmap(tp->aperegs);
17566                 tp->aperegs = NULL;
17567         }
17568
17569 err_out_iounmap:
17570         if (tp->regs) {
17571                 iounmap(tp->regs);
17572                 tp->regs = NULL;
17573         }
17574
17575 err_out_free_dev:
17576         free_netdev(dev);
17577
17578 err_out_free_res:
17579         pci_release_regions(pdev);
17580
17581 err_out_disable_pdev:
17582         pci_disable_device(pdev);
17583         pci_set_drvdata(pdev, NULL);
17584         return err;
17585 }
17586
17587 static void tg3_remove_one(struct pci_dev *pdev)
17588 {
17589         struct net_device *dev = pci_get_drvdata(pdev);
17590
17591         if (dev) {
17592                 struct tg3 *tp = netdev_priv(dev);
17593
17594                 release_firmware(tp->fw);
17595
17596                 tg3_reset_task_cancel(tp);
17597
17598                 if (tg3_flag(tp, USE_PHYLIB)) {
17599                         tg3_phy_fini(tp);
17600                         tg3_mdio_fini(tp);
17601                 }
17602
17603                 unregister_netdev(dev);
17604                 if (tp->aperegs) {
17605                         iounmap(tp->aperegs);
17606                         tp->aperegs = NULL;
17607                 }
17608                 if (tp->regs) {
17609                         iounmap(tp->regs);
17610                         tp->regs = NULL;
17611                 }
17612                 free_netdev(dev);
17613                 pci_release_regions(pdev);
17614                 pci_disable_device(pdev);
17615                 pci_set_drvdata(pdev, NULL);
17616         }
17617 }
17618
17619 #ifdef CONFIG_PM_SLEEP
17620 static int tg3_suspend(struct device *device)
17621 {
17622         struct pci_dev *pdev = to_pci_dev(device);
17623         struct net_device *dev = pci_get_drvdata(pdev);
17624         struct tg3 *tp = netdev_priv(dev);
17625         int err;
17626
17627         if (!netif_running(dev))
17628                 return 0;
17629
17630         tg3_reset_task_cancel(tp);
17631         tg3_phy_stop(tp);
17632         tg3_netif_stop(tp);
17633
17634         tg3_timer_stop(tp);
17635
17636         tg3_full_lock(tp, 1);
17637         tg3_disable_ints(tp);
17638         tg3_full_unlock(tp);
17639
17640         netif_device_detach(dev);
17641
17642         tg3_full_lock(tp, 0);
17643         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17644         tg3_flag_clear(tp, INIT_COMPLETE);
17645         tg3_full_unlock(tp);
17646
17647         err = tg3_power_down_prepare(tp);
17648         if (err) {
17649                 int err2;
17650
17651                 tg3_full_lock(tp, 0);
17652
17653                 tg3_flag_set(tp, INIT_COMPLETE);
17654                 err2 = tg3_restart_hw(tp, true);
17655                 if (err2)
17656                         goto out;
17657
17658                 tg3_timer_start(tp);
17659
17660                 netif_device_attach(dev);
17661                 tg3_netif_start(tp);
17662
17663 out:
17664                 tg3_full_unlock(tp);
17665
17666                 if (!err2)
17667                         tg3_phy_start(tp);
17668         }
17669
17670         return err;
17671 }
17672
17673 static int tg3_resume(struct device *device)
17674 {
17675         struct pci_dev *pdev = to_pci_dev(device);
17676         struct net_device *dev = pci_get_drvdata(pdev);
17677         struct tg3 *tp = netdev_priv(dev);
17678         int err;
17679
17680         if (!netif_running(dev))
17681                 return 0;
17682
17683         netif_device_attach(dev);
17684
17685         tg3_full_lock(tp, 0);
17686
17687         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17688
17689         tg3_flag_set(tp, INIT_COMPLETE);
17690         err = tg3_restart_hw(tp,
17691                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17692         if (err)
17693                 goto out;
17694
17695         tg3_timer_start(tp);
17696
17697         tg3_netif_start(tp);
17698
17699 out:
17700         tg3_full_unlock(tp);
17701
17702         if (!err)
17703                 tg3_phy_start(tp);
17704
17705         return err;
17706 }
17707 #endif /* CONFIG_PM_SLEEP */
17708
17709 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17710
17711 /**
17712  * tg3_io_error_detected - called when PCI error is detected
17713  * @pdev: Pointer to PCI device
17714  * @state: The current pci connection state
17715  *
17716  * This function is called after a PCI bus error affecting
17717  * this device has been detected.
17718  */
17719 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17720                                               pci_channel_state_t state)
17721 {
17722         struct net_device *netdev = pci_get_drvdata(pdev);
17723         struct tg3 *tp = netdev_priv(netdev);
17724         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17725
17726         netdev_info(netdev, "PCI I/O error detected\n");
17727
17728         rtnl_lock();
17729
17730         if (!netif_running(netdev))
17731                 goto done;
17732
17733         tg3_phy_stop(tp);
17734
17735         tg3_netif_stop(tp);
17736
17737         tg3_timer_stop(tp);
17738
17739         /* Want to make sure that the reset task doesn't run */
17740         tg3_reset_task_cancel(tp);
17741
17742         netif_device_detach(netdev);
17743
17744         /* Clean up software state, even if MMIO is blocked */
17745         tg3_full_lock(tp, 0);
17746         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17747         tg3_full_unlock(tp);
17748
17749 done:
17750         if (state == pci_channel_io_perm_failure)
17751                 err = PCI_ERS_RESULT_DISCONNECT;
17752         else
17753                 pci_disable_device(pdev);
17754
17755         rtnl_unlock();
17756
17757         return err;
17758 }
17759
17760 /**
17761  * tg3_io_slot_reset - called after the pci bus has been reset.
17762  * @pdev: Pointer to PCI device
17763  *
17764  * Restart the card from scratch, as if from a cold-boot.
17765  * At this point, the card has exprienced a hard reset,
17766  * followed by fixups by BIOS, and has its config space
17767  * set up identically to what it was at cold boot.
17768  */
17769 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17770 {
17771         struct net_device *netdev = pci_get_drvdata(pdev);
17772         struct tg3 *tp = netdev_priv(netdev);
17773         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17774         int err;
17775
17776         rtnl_lock();
17777
17778         if (pci_enable_device(pdev)) {
17779                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17780                 goto done;
17781         }
17782
17783         pci_set_master(pdev);
17784         pci_restore_state(pdev);
17785         pci_save_state(pdev);
17786
17787         if (!netif_running(netdev)) {
17788                 rc = PCI_ERS_RESULT_RECOVERED;
17789                 goto done;
17790         }
17791
17792         err = tg3_power_up(tp);
17793         if (err)
17794                 goto done;
17795
17796         rc = PCI_ERS_RESULT_RECOVERED;
17797
17798 done:
17799         rtnl_unlock();
17800
17801         return rc;
17802 }
17803
17804 /**
17805  * tg3_io_resume - called when traffic can start flowing again.
17806  * @pdev: Pointer to PCI device
17807  *
17808  * This callback is called when the error recovery driver tells
17809  * us that its OK to resume normal operation.
17810  */
17811 static void tg3_io_resume(struct pci_dev *pdev)
17812 {
17813         struct net_device *netdev = pci_get_drvdata(pdev);
17814         struct tg3 *tp = netdev_priv(netdev);
17815         int err;
17816
17817         rtnl_lock();
17818
17819         if (!netif_running(netdev))
17820                 goto done;
17821
17822         tg3_full_lock(tp, 0);
17823         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17824         tg3_flag_set(tp, INIT_COMPLETE);
17825         err = tg3_restart_hw(tp, true);
17826         if (err) {
17827                 tg3_full_unlock(tp);
17828                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17829                 goto done;
17830         }
17831
17832         netif_device_attach(netdev);
17833
17834         tg3_timer_start(tp);
17835
17836         tg3_netif_start(tp);
17837
17838         tg3_full_unlock(tp);
17839
17840         tg3_phy_start(tp);
17841
17842 done:
17843         rtnl_unlock();
17844 }
17845
17846 static const struct pci_error_handlers tg3_err_handler = {
17847         .error_detected = tg3_io_error_detected,
17848         .slot_reset     = tg3_io_slot_reset,
17849         .resume         = tg3_io_resume
17850 };
17851
17852 static struct pci_driver tg3_driver = {
17853         .name           = DRV_MODULE_NAME,
17854         .id_table       = tg3_pci_tbl,
17855         .probe          = tg3_init_one,
17856         .remove         = tg3_remove_one,
17857         .err_handler    = &tg3_err_handler,
17858         .driver.pm      = &tg3_pm_ops,
17859 };
17860
17861 module_pci_driver(tg3_driver);