2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
11 * Derived from proprietary unpublished source code,
12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
15 * Permission is hereby granted for the distribution of this firmware
16 * data in hexadecimal or equivalent format, provided this copyright
17 * notice is accompanying it.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
43 #include <linux/if_vlan.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
54 #include <net/checksum.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
65 #include <asm/idprom.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version[] =
226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 TG3_DRV_DATA_FLAG_5705_10_100},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 PCI_VENDOR_ID_LENOVO,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
364 static const struct {
365 const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
399 { "tx_flow_control" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 [TG3_NVRAM_TEST] = { "nvram test (online) " },
462 [TG3_LINK_TEST] = { "link test (online) " },
463 [TG3_REGISTER_TEST] = { "register test (offline)" },
464 [TG3_MEMORY_TEST] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
476 writel(val, tp->regs + off);
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
481 return readl(tp->regs + off);
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
486 writel(val, tp->aperegs + off);
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
491 return readl(tp->aperegs + off);
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
506 writel(val, tp->regs + off);
507 readl(tp->regs + off);
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
515 spin_lock_irqsave(&tp->indirect_lock, flags);
516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 TG3_64BIT_REG_LOW, val);
531 if (off == TG3_RX_STD_PROD_IDX_REG) {
532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 TG3_64BIT_REG_LOW, val);
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 spin_unlock_irqrestore(&tp->indirect_lock, flags);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
557 spin_lock_irqsave(&tp->indirect_lock, flags);
558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 /* Non-posted methods */
573 tp->write32(tp, off, val);
576 tg3_write32(tp, off, val);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
590 tp->write32_mbox(tp, off, val);
591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 !tg3_flag(tp, ICH_WORKAROUND)))
594 tp->read32_mbox(tp, off);
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
599 void __iomem *mbox = tp->regs + off;
601 if (tg3_flag(tp, TXD_MBOX_HWBUG))
603 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 tg3_flag(tp, FLUSH_POSTED_WRITES))
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
610 return readl(tp->regs + off + GRCMBOX_BASE);
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
615 writel(val, tp->regs + off + GRCMBOX_BASE);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
633 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
637 spin_lock_irqsave(&tp->indirect_lock, flags);
638 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 tw32_f(TG3PCI_MEM_WIN_DATA, val);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 spin_unlock_irqrestore(&tp->indirect_lock, flags);
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
658 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
664 spin_lock_irqsave(&tp->indirect_lock, flags);
665 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 *val = tr32(TG3PCI_MEM_WIN_DATA);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
678 spin_unlock_irqrestore(&tp->indirect_lock, flags);
681 static void tg3_ape_lock_init(struct tg3 *tp)
686 if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 regbase = TG3_APE_LOCK_GRANT;
689 regbase = TG3_APE_PER_LOCK_GRANT;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
694 case TG3_APE_LOCK_PHY0:
695 case TG3_APE_LOCK_PHY1:
696 case TG3_APE_LOCK_PHY2:
697 case TG3_APE_LOCK_PHY3:
698 bit = APE_LOCK_GRANT_DRIVER;
702 bit = APE_LOCK_GRANT_DRIVER;
704 bit = 1 << tp->pci_fn;
706 tg3_ape_write32(tp, regbase + 4 * i, bit);
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
715 u32 status, req, gnt, bit;
717 if (!tg3_flag(tp, ENABLE_APE))
721 case TG3_APE_LOCK_GPIO:
722 if (tg3_asic_rev(tp) == ASIC_REV_5761)
724 case TG3_APE_LOCK_GRC:
725 case TG3_APE_LOCK_MEM:
727 bit = APE_LOCK_REQ_DRIVER;
729 bit = 1 << tp->pci_fn;
731 case TG3_APE_LOCK_PHY0:
732 case TG3_APE_LOCK_PHY1:
733 case TG3_APE_LOCK_PHY2:
734 case TG3_APE_LOCK_PHY3:
735 bit = APE_LOCK_REQ_DRIVER;
741 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742 req = TG3_APE_LOCK_REQ;
743 gnt = TG3_APE_LOCK_GRANT;
745 req = TG3_APE_PER_LOCK_REQ;
746 gnt = TG3_APE_PER_LOCK_GRANT;
751 tg3_ape_write32(tp, req + off, bit);
753 /* Wait for up to 1 millisecond to acquire lock. */
754 for (i = 0; i < 100; i++) {
755 status = tg3_ape_read32(tp, gnt + off);
758 if (pci_channel_offline(tp->pdev))
765 /* Revoke the lock request. */
766 tg3_ape_write32(tp, gnt + off, bit);
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
777 if (!tg3_flag(tp, ENABLE_APE))
781 case TG3_APE_LOCK_GPIO:
782 if (tg3_asic_rev(tp) == ASIC_REV_5761)
784 case TG3_APE_LOCK_GRC:
785 case TG3_APE_LOCK_MEM:
787 bit = APE_LOCK_GRANT_DRIVER;
789 bit = 1 << tp->pci_fn;
791 case TG3_APE_LOCK_PHY0:
792 case TG3_APE_LOCK_PHY1:
793 case TG3_APE_LOCK_PHY2:
794 case TG3_APE_LOCK_PHY3:
795 bit = APE_LOCK_GRANT_DRIVER;
801 if (tg3_asic_rev(tp) == ASIC_REV_5761)
802 gnt = TG3_APE_LOCK_GRANT;
804 gnt = TG3_APE_PER_LOCK_GRANT;
806 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
814 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827 return timeout_us ? 0 : -EBUSY;
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
835 for (i = 0; i < timeout_us / 10; i++) {
836 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
838 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
844 return i == timeout_us / 10;
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
851 u32 i, bufoff, msgoff, maxlen, apedata;
853 if (!tg3_flag(tp, APE_HAS_NCSI))
856 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857 if (apedata != APE_SEG_SIG_MAGIC)
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
864 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
866 msgoff = bufoff + 2 * sizeof(u32);
867 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
872 /* Cap xfer sizes to scratchpad limits. */
873 length = (len > maxlen) ? maxlen : len;
876 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877 if (!(apedata & APE_FW_STATUS_READY))
880 /* Wait for up to 1 msec for APE to service previous event. */
881 err = tg3_ape_event_lock(tp, 1000);
885 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886 APE_EVENT_STATUS_SCRTCHPD_READ |
887 APE_EVENT_STATUS_EVENT_PENDING;
888 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
890 tg3_ape_write32(tp, bufoff, base_off);
891 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
893 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
898 if (tg3_ape_wait_for_event(tp, 30000))
901 for (i = 0; length; i += 4, length -= 4) {
902 u32 val = tg3_ape_read32(tp, msgoff + i);
903 memcpy(data, &val, sizeof(u32));
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
917 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918 if (apedata != APE_SEG_SIG_MAGIC)
921 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922 if (!(apedata & APE_FW_STATUS_READY))
925 /* Wait for up to 20 millisecond for APE to service previous event. */
926 err = tg3_ape_event_lock(tp, 20000);
930 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931 event | APE_EVENT_STATUS_EVENT_PENDING);
933 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
944 if (!tg3_flag(tp, ENABLE_APE))
948 case RESET_KIND_INIT:
949 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
951 APE_HOST_SEG_SIG_MAGIC);
952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
953 APE_HOST_SEG_LEN_MAGIC);
954 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
955 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
957 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
958 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
959 APE_HOST_BEHAV_NO_PHYLOCK);
960 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
961 TG3_APE_HOST_DRVR_STATE_START);
963 event = APE_EVENT_STATUS_STATE_START;
965 case RESET_KIND_SHUTDOWN:
966 if (device_may_wakeup(&tp->pdev->dev) &&
967 tg3_flag(tp, WOL_ENABLE)) {
968 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
969 TG3_APE_HOST_WOL_SPEED_AUTO);
970 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976 event = APE_EVENT_STATUS_STATE_UNLOAD;
982 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984 tg3_ape_send_event(tp, event);
987 static void tg3_send_ape_heartbeat(struct tg3 *tp,
988 unsigned long interval)
990 /* Check if hb interval has exceeded */
991 if (!tg3_flag(tp, ENABLE_APE) ||
992 time_before(jiffies, tp->ape_hb_jiffies + interval))
995 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996 tp->ape_hb_jiffies = jiffies;
999 static void tg3_disable_ints(struct tg3 *tp)
1003 tw32(TG3PCI_MISC_HOST_CTRL,
1004 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1005 for (i = 0; i < tp->irq_max; i++)
1006 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 static void tg3_enable_ints(struct tg3 *tp)
1016 tw32(TG3PCI_MISC_HOST_CTRL,
1017 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1019 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1020 for (i = 0; i < tp->irq_cnt; i++) {
1021 struct tg3_napi *tnapi = &tp->napi[i];
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024 if (tg3_flag(tp, 1SHOT_MSI))
1025 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1027 tp->coal_now |= tnapi->coal_now;
1030 /* Force an initial interrupt */
1031 if (!tg3_flag(tp, TAGGED_STATUS) &&
1032 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1033 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1035 tw32(HOSTCC_MODE, tp->coal_now);
1037 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1042 struct tg3 *tp = tnapi->tp;
1043 struct tg3_hw_status *sblk = tnapi->hw_status;
1044 unsigned int work_exists = 0;
1046 /* check for phy events */
1047 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1048 if (sblk->status & SD_STATUS_LINK_CHG)
1052 /* check for TX work to do */
1053 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056 /* check for RX work to do */
1057 if (tnapi->rx_rcb_prod_idx &&
1058 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1065 * similar to tg3_enable_ints, but it accurately determines whether there
1066 * is new work pending and can return without flushing the PIO write
1067 * which reenables interrupts
1069 static void tg3_int_reenable(struct tg3_napi *tnapi)
1071 struct tg3 *tp = tnapi->tp;
1073 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076 /* When doing tagged status, this work check is unnecessary.
1077 * The last_tag we write above tells the chip which piece of
1078 * work we've completed.
1080 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1081 tw32(HOSTCC_MODE, tp->coalesce_mode |
1082 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1085 static void tg3_switch_clocks(struct tg3 *tp)
1088 u32 orig_clock_ctrl;
1090 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1093 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1095 orig_clock_ctrl = clock_ctrl;
1096 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1097 CLOCK_CTRL_CLKRUN_OENABLE |
1099 tp->pci_clock_ctrl = clock_ctrl;
1101 if (tg3_flag(tp, 5705_PLUS)) {
1102 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1103 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1106 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1107 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1111 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1112 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1115 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1118 #define PHY_BUSY_LOOPS 5000
1120 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1127 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1129 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1133 tg3_ape_lock(tp, tp->phy_ape_lock);
1137 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1138 MI_COM_PHY_ADDR_MASK);
1139 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1140 MI_COM_REG_ADDR_MASK);
1141 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1143 tw32_f(MAC_MI_COM, frame_val);
1145 loops = PHY_BUSY_LOOPS;
1146 while (loops != 0) {
1148 frame_val = tr32(MAC_MI_COM);
1150 if ((frame_val & MI_COM_BUSY) == 0) {
1152 frame_val = tr32(MAC_MI_COM);
1160 *val = frame_val & MI_COM_DATA_MASK;
1164 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1165 tw32_f(MAC_MI_MODE, tp->mi_mode);
1169 tg3_ape_unlock(tp, tp->phy_ape_lock);
1174 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1176 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1179 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1186 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1187 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1192 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1196 tg3_ape_lock(tp, tp->phy_ape_lock);
1198 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1199 MI_COM_PHY_ADDR_MASK);
1200 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1201 MI_COM_REG_ADDR_MASK);
1202 frame_val |= (val & MI_COM_DATA_MASK);
1203 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1205 tw32_f(MAC_MI_COM, frame_val);
1207 loops = PHY_BUSY_LOOPS;
1208 while (loops != 0) {
1210 frame_val = tr32(MAC_MI_COM);
1211 if ((frame_val & MI_COM_BUSY) == 0) {
1213 frame_val = tr32(MAC_MI_COM);
1223 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1224 tw32_f(MAC_MI_MODE, tp->mi_mode);
1228 tg3_ape_unlock(tp, tp->phy_ape_lock);
1233 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1235 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1238 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1242 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1251 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1261 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1265 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1269 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1273 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1274 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1278 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1284 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1295 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1299 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1301 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1306 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1310 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1311 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1312 MII_TG3_AUXCTL_SHDWSEL_MISC);
1314 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1319 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1321 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1322 set |= MII_TG3_AUXCTL_MISC_WREN;
1324 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1327 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1332 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1338 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1340 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1342 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1343 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1348 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1350 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1351 reg | val | MII_TG3_MISC_SHDW_WREN);
1354 static int tg3_bmcr_reset(struct tg3 *tp)
1359 /* OK, reset it, and poll the BMCR_RESET bit until it
1360 * clears or we time out.
1362 phy_control = BMCR_RESET;
1363 err = tg3_writephy(tp, MII_BMCR, phy_control);
1369 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1373 if ((phy_control & BMCR_RESET) == 0) {
1385 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1387 struct tg3 *tp = bp->priv;
1390 spin_lock_bh(&tp->lock);
1392 if (__tg3_readphy(tp, mii_id, reg, &val))
1395 spin_unlock_bh(&tp->lock);
1400 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1402 struct tg3 *tp = bp->priv;
1405 spin_lock_bh(&tp->lock);
1407 if (__tg3_writephy(tp, mii_id, reg, val))
1410 spin_unlock_bh(&tp->lock);
1415 static void tg3_mdio_config_5785(struct tg3 *tp)
1418 struct phy_device *phydev;
1420 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1421 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1422 case PHY_ID_BCM50610:
1423 case PHY_ID_BCM50610M:
1424 val = MAC_PHYCFG2_50610_LED_MODES;
1426 case PHY_ID_BCMAC131:
1427 val = MAC_PHYCFG2_AC131_LED_MODES;
1429 case PHY_ID_RTL8211C:
1430 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1432 case PHY_ID_RTL8201E:
1433 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1439 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1440 tw32(MAC_PHYCFG2, val);
1442 val = tr32(MAC_PHYCFG1);
1443 val &= ~(MAC_PHYCFG1_RGMII_INT |
1444 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1445 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1446 tw32(MAC_PHYCFG1, val);
1451 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1452 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1453 MAC_PHYCFG2_FMODE_MASK_MASK |
1454 MAC_PHYCFG2_GMODE_MASK_MASK |
1455 MAC_PHYCFG2_ACT_MASK_MASK |
1456 MAC_PHYCFG2_QUAL_MASK_MASK |
1457 MAC_PHYCFG2_INBAND_ENABLE;
1459 tw32(MAC_PHYCFG2, val);
1461 val = tr32(MAC_PHYCFG1);
1462 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1463 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1464 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1465 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1466 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1470 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1471 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1472 tw32(MAC_PHYCFG1, val);
1474 val = tr32(MAC_EXT_RGMII_MODE);
1475 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1476 MAC_RGMII_MODE_RX_QUALITY |
1477 MAC_RGMII_MODE_RX_ACTIVITY |
1478 MAC_RGMII_MODE_RX_ENG_DET |
1479 MAC_RGMII_MODE_TX_ENABLE |
1480 MAC_RGMII_MODE_TX_LOWPWR |
1481 MAC_RGMII_MODE_TX_RESET);
1482 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1483 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1484 val |= MAC_RGMII_MODE_RX_INT_B |
1485 MAC_RGMII_MODE_RX_QUALITY |
1486 MAC_RGMII_MODE_RX_ACTIVITY |
1487 MAC_RGMII_MODE_RX_ENG_DET;
1488 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1489 val |= MAC_RGMII_MODE_TX_ENABLE |
1490 MAC_RGMII_MODE_TX_LOWPWR |
1491 MAC_RGMII_MODE_TX_RESET;
1493 tw32(MAC_EXT_RGMII_MODE, val);
1496 static void tg3_mdio_start(struct tg3 *tp)
1498 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1499 tw32_f(MAC_MI_MODE, tp->mi_mode);
1502 if (tg3_flag(tp, MDIOBUS_INITED) &&
1503 tg3_asic_rev(tp) == ASIC_REV_5785)
1504 tg3_mdio_config_5785(tp);
1507 static int tg3_mdio_init(struct tg3 *tp)
1511 struct phy_device *phydev;
1513 if (tg3_flag(tp, 5717_PLUS)) {
1516 tp->phy_addr = tp->pci_fn + 1;
1518 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1519 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1521 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1522 TG3_CPMU_PHY_STRAP_IS_SERDES;
1525 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1528 addr = ssb_gige_get_phyaddr(tp->pdev);
1531 tp->phy_addr = addr;
1533 tp->phy_addr = TG3_PHY_MII_ADDR;
1537 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1540 tp->mdio_bus = mdiobus_alloc();
1541 if (tp->mdio_bus == NULL)
1544 tp->mdio_bus->name = "tg3 mdio bus";
1545 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1546 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1547 tp->mdio_bus->priv = tp;
1548 tp->mdio_bus->parent = &tp->pdev->dev;
1549 tp->mdio_bus->read = &tg3_mdio_read;
1550 tp->mdio_bus->write = &tg3_mdio_write;
1551 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1553 /* The bus registration will look for all the PHYs on the mdio bus.
1554 * Unfortunately, it does not ensure the PHY is powered up before
1555 * accessing the PHY ID registers. A chip reset is the
1556 * quickest way to bring the device back to an operational state..
1558 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1561 i = mdiobus_register(tp->mdio_bus);
1563 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1564 mdiobus_free(tp->mdio_bus);
1568 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1570 if (!phydev || !phydev->drv) {
1571 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1572 mdiobus_unregister(tp->mdio_bus);
1573 mdiobus_free(tp->mdio_bus);
1577 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1578 case PHY_ID_BCM57780:
1579 phydev->interface = PHY_INTERFACE_MODE_GMII;
1580 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582 case PHY_ID_BCM50610:
1583 case PHY_ID_BCM50610M:
1584 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1585 PHY_BRCM_RX_REFCLK_UNUSED |
1586 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1587 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1589 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1590 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1591 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1592 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1593 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1595 case PHY_ID_RTL8211C:
1596 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1598 case PHY_ID_RTL8201E:
1599 case PHY_ID_BCMAC131:
1600 phydev->interface = PHY_INTERFACE_MODE_MII;
1601 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1602 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1606 tg3_flag_set(tp, MDIOBUS_INITED);
1608 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1609 tg3_mdio_config_5785(tp);
1614 static void tg3_mdio_fini(struct tg3 *tp)
1616 if (tg3_flag(tp, MDIOBUS_INITED)) {
1617 tg3_flag_clear(tp, MDIOBUS_INITED);
1618 mdiobus_unregister(tp->mdio_bus);
1619 mdiobus_free(tp->mdio_bus);
1623 /* tp->lock is held. */
1624 static inline void tg3_generate_fw_event(struct tg3 *tp)
1628 val = tr32(GRC_RX_CPU_EVENT);
1629 val |= GRC_RX_CPU_DRIVER_EVENT;
1630 tw32_f(GRC_RX_CPU_EVENT, val);
1632 tp->last_event_jiffies = jiffies;
1635 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1637 /* tp->lock is held. */
1638 static void tg3_wait_for_event_ack(struct tg3 *tp)
1641 unsigned int delay_cnt;
1644 /* If enough time has passed, no wait is necessary. */
1645 time_remain = (long)(tp->last_event_jiffies + 1 +
1646 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1648 if (time_remain < 0)
1651 /* Check if we can shorten the wait time. */
1652 delay_cnt = jiffies_to_usecs(time_remain);
1653 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1654 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1655 delay_cnt = (delay_cnt >> 3) + 1;
1657 for (i = 0; i < delay_cnt; i++) {
1658 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1660 if (pci_channel_offline(tp->pdev))
1667 /* tp->lock is held. */
1668 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1673 if (!tg3_readphy(tp, MII_BMCR, ®))
1675 if (!tg3_readphy(tp, MII_BMSR, ®))
1676 val |= (reg & 0xffff);
1680 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1682 if (!tg3_readphy(tp, MII_LPA, ®))
1683 val |= (reg & 0xffff);
1687 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1688 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1690 if (!tg3_readphy(tp, MII_STAT1000, ®))
1691 val |= (reg & 0xffff);
1695 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1702 /* tp->lock is held. */
1703 static void tg3_ump_link_report(struct tg3 *tp)
1707 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1710 tg3_phy_gather_ump_data(tp, data);
1712 tg3_wait_for_event_ack(tp);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1721 tg3_generate_fw_event(tp);
1724 /* tp->lock is held. */
1725 static void tg3_stop_fw(struct tg3 *tp)
1727 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1728 /* Wait for RX cpu to ACK the previous event. */
1729 tg3_wait_for_event_ack(tp);
1731 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1733 tg3_generate_fw_event(tp);
1735 /* Wait for RX cpu to ACK this event. */
1736 tg3_wait_for_event_ack(tp);
1740 /* tp->lock is held. */
1741 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1743 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1744 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1746 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1748 case RESET_KIND_INIT:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 case RESET_KIND_SHUTDOWN:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 case RESET_KIND_SUSPEND:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769 /* tp->lock is held. */
1770 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1772 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1774 case RESET_KIND_INIT:
1775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776 DRV_STATE_START_DONE);
1779 case RESET_KIND_SHUTDOWN:
1780 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781 DRV_STATE_UNLOAD_DONE);
1790 /* tp->lock is held. */
1791 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1793 if (tg3_flag(tp, ENABLE_ASF)) {
1795 case RESET_KIND_INIT:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1800 case RESET_KIND_SHUTDOWN:
1801 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1805 case RESET_KIND_SUSPEND:
1806 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1816 static int tg3_poll_fw(struct tg3 *tp)
1821 if (tg3_flag(tp, NO_FWARE_REPORTED))
1824 if (tg3_flag(tp, IS_SSB_CORE)) {
1825 /* We don't use firmware. */
1829 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1830 /* Wait up to 20ms for init done. */
1831 for (i = 0; i < 200; i++) {
1832 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1834 if (pci_channel_offline(tp->pdev))
1842 /* Wait for firmware initialization to complete. */
1843 for (i = 0; i < 100000; i++) {
1844 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1845 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1847 if (pci_channel_offline(tp->pdev)) {
1848 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1849 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850 netdev_info(tp->dev, "No firmware running\n");
1859 /* Chip might not be fitted with firmware. Some Sun onboard
1860 * parts are configured like that. So don't signal the timeout
1861 * of the above loop as an error, but do report the lack of
1862 * running firmware once.
1864 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1865 tg3_flag_set(tp, NO_FWARE_REPORTED);
1867 netdev_info(tp->dev, "No firmware running\n");
1870 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1871 /* The 57765 A0 needs a little more
1872 * time to do some important work.
1880 static void tg3_link_report(struct tg3 *tp)
1882 if (!netif_carrier_ok(tp->dev)) {
1883 netif_info(tp, link, tp->dev, "Link is down\n");
1884 tg3_ump_link_report(tp);
1885 } else if (netif_msg_link(tp)) {
1886 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1887 (tp->link_config.active_speed == SPEED_1000 ?
1889 (tp->link_config.active_speed == SPEED_100 ?
1891 (tp->link_config.active_duplex == DUPLEX_FULL ?
1894 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1895 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1897 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1900 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1901 netdev_info(tp->dev, "EEE is %s\n",
1902 tp->setlpicnt ? "enabled" : "disabled");
1904 tg3_ump_link_report(tp);
1907 tp->link_up = netif_carrier_ok(tp->dev);
1910 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1914 if (adv & ADVERTISE_PAUSE_CAP) {
1915 flowctrl |= FLOW_CTRL_RX;
1916 if (!(adv & ADVERTISE_PAUSE_ASYM))
1917 flowctrl |= FLOW_CTRL_TX;
1918 } else if (adv & ADVERTISE_PAUSE_ASYM)
1919 flowctrl |= FLOW_CTRL_TX;
1924 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1928 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1929 miireg = ADVERTISE_1000XPAUSE;
1930 else if (flow_ctrl & FLOW_CTRL_TX)
1931 miireg = ADVERTISE_1000XPSE_ASYM;
1932 else if (flow_ctrl & FLOW_CTRL_RX)
1933 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1940 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1944 if (adv & ADVERTISE_1000XPAUSE) {
1945 flowctrl |= FLOW_CTRL_RX;
1946 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1947 flowctrl |= FLOW_CTRL_TX;
1948 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1949 flowctrl |= FLOW_CTRL_TX;
1954 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1958 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1959 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1960 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1961 if (lcladv & ADVERTISE_1000XPAUSE)
1963 if (rmtadv & ADVERTISE_1000XPAUSE)
1970 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1974 u32 old_rx_mode = tp->rx_mode;
1975 u32 old_tx_mode = tp->tx_mode;
1977 if (tg3_flag(tp, USE_PHYLIB))
1978 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1980 autoneg = tp->link_config.autoneg;
1982 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1983 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1984 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1986 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1988 flowctrl = tp->link_config.flowctrl;
1990 tp->link_config.active_flowctrl = flowctrl;
1992 if (flowctrl & FLOW_CTRL_RX)
1993 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1995 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1997 if (old_rx_mode != tp->rx_mode)
1998 tw32_f(MAC_RX_MODE, tp->rx_mode);
2000 if (flowctrl & FLOW_CTRL_TX)
2001 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2003 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2005 if (old_tx_mode != tp->tx_mode)
2006 tw32_f(MAC_TX_MODE, tp->tx_mode);
2009 static void tg3_adjust_link(struct net_device *dev)
2011 u8 oldflowctrl, linkmesg = 0;
2012 u32 mac_mode, lcl_adv, rmt_adv;
2013 struct tg3 *tp = netdev_priv(dev);
2014 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2016 spin_lock_bh(&tp->lock);
2018 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2019 MAC_MODE_HALF_DUPLEX);
2021 oldflowctrl = tp->link_config.active_flowctrl;
2027 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2028 mac_mode |= MAC_MODE_PORT_MODE_MII;
2029 else if (phydev->speed == SPEED_1000 ||
2030 tg3_asic_rev(tp) != ASIC_REV_5785)
2031 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2033 mac_mode |= MAC_MODE_PORT_MODE_MII;
2035 if (phydev->duplex == DUPLEX_HALF)
2036 mac_mode |= MAC_MODE_HALF_DUPLEX;
2038 lcl_adv = mii_advertise_flowctrl(
2039 tp->link_config.flowctrl);
2042 rmt_adv = LPA_PAUSE_CAP;
2043 if (phydev->asym_pause)
2044 rmt_adv |= LPA_PAUSE_ASYM;
2047 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2049 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2051 if (mac_mode != tp->mac_mode) {
2052 tp->mac_mode = mac_mode;
2053 tw32_f(MAC_MODE, tp->mac_mode);
2057 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2058 if (phydev->speed == SPEED_10)
2060 MAC_MI_STAT_10MBPS_MODE |
2061 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2067 tw32(MAC_TX_LENGTHS,
2068 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069 (6 << TX_LENGTHS_IPG_SHIFT) |
2070 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 tw32(MAC_TX_LENGTHS,
2073 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2074 (6 << TX_LENGTHS_IPG_SHIFT) |
2075 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2077 if (phydev->link != tp->old_link ||
2078 phydev->speed != tp->link_config.active_speed ||
2079 phydev->duplex != tp->link_config.active_duplex ||
2080 oldflowctrl != tp->link_config.active_flowctrl)
2083 tp->old_link = phydev->link;
2084 tp->link_config.active_speed = phydev->speed;
2085 tp->link_config.active_duplex = phydev->duplex;
2087 spin_unlock_bh(&tp->lock);
2090 tg3_link_report(tp);
2093 static int tg3_phy_init(struct tg3 *tp)
2095 struct phy_device *phydev;
2097 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2100 /* Bring the PHY back to a known state. */
2103 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2105 /* Attach the MAC to the PHY. */
2106 phydev = phy_connect(tp->dev, phydev_name(phydev),
2107 tg3_adjust_link, phydev->interface);
2108 if (IS_ERR(phydev)) {
2109 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2110 return PTR_ERR(phydev);
2113 /* Mask with MAC supported features. */
2114 switch (phydev->interface) {
2115 case PHY_INTERFACE_MODE_GMII:
2116 case PHY_INTERFACE_MODE_RGMII:
2117 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2118 phydev->supported &= (PHY_GBIT_FEATURES |
2120 SUPPORTED_Asym_Pause);
2124 case PHY_INTERFACE_MODE_MII:
2125 phydev->supported &= (PHY_BASIC_FEATURES |
2127 SUPPORTED_Asym_Pause);
2130 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2134 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2136 phydev->advertising = phydev->supported;
2138 phy_attached_info(phydev);
2143 static void tg3_phy_start(struct tg3 *tp)
2145 struct phy_device *phydev;
2147 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2150 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2152 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2154 phydev->speed = tp->link_config.speed;
2155 phydev->duplex = tp->link_config.duplex;
2156 phydev->autoneg = tp->link_config.autoneg;
2157 phydev->advertising = tp->link_config.advertising;
2162 phy_start_aneg(phydev);
2165 static void tg3_phy_stop(struct tg3 *tp)
2167 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2170 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2173 static void tg3_phy_fini(struct tg3 *tp)
2175 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2176 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2177 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2181 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2186 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2189 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2190 /* Cannot do read-modify-write on 5401 */
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2193 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2198 err = tg3_phy_auxctl_read(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2203 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2204 err = tg3_phy_auxctl_write(tp,
2205 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2211 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2215 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2218 tg3_writephy(tp, MII_TG3_FET_TEST,
2219 phytest | MII_TG3_FET_SHADOW_EN);
2220 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2222 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2224 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2225 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2227 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2231 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tg3_flag(tp, 5717_PLUS) &&
2237 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2240 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2241 tg3_phy_fet_toggle_apd(tp, enable);
2245 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2246 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2247 MII_TG3_MISC_SHDW_SCR5_SDTL |
2248 MII_TG3_MISC_SHDW_SCR5_C125OE;
2249 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2250 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2252 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2255 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2257 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2259 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2262 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2266 if (!tg3_flag(tp, 5705_PLUS) ||
2267 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2270 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2273 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2274 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2276 tg3_writephy(tp, MII_TG3_FET_TEST,
2277 ephy | MII_TG3_FET_SHADOW_EN);
2278 if (!tg3_readphy(tp, reg, &phy)) {
2280 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2282 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2283 tg3_writephy(tp, reg, phy);
2285 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2290 ret = tg3_phy_auxctl_read(tp,
2291 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2294 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2296 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2297 tg3_phy_auxctl_write(tp,
2298 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2303 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2308 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2311 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2313 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2314 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2317 static void tg3_phy_apply_otp(struct tg3 *tp)
2326 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2329 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2330 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2331 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2333 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2334 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2337 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2338 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2339 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2341 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2342 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2344 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2345 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2347 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2348 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2349 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2351 tg3_phy_toggle_auxctl_smdsp(tp, false);
2354 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2357 struct ethtool_eee *dest = &tp->eee;
2359 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2365 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2368 /* Pull eee_active */
2369 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2370 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2371 dest->eee_active = 1;
2373 dest->eee_active = 0;
2375 /* Pull lp advertised settings */
2376 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2378 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 /* Pull advertised and eee_enabled settings */
2381 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2383 dest->eee_enabled = !!val;
2384 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2386 /* Pull tx_lpi_enabled */
2387 val = tr32(TG3_CPMU_EEE_MODE);
2388 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2390 /* Pull lpi timer value */
2391 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2394 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2398 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2403 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2405 tp->link_config.active_duplex == DUPLEX_FULL &&
2406 (tp->link_config.active_speed == SPEED_100 ||
2407 tp->link_config.active_speed == SPEED_1000)) {
2410 if (tp->link_config.active_speed == SPEED_1000)
2411 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2413 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2415 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2417 tg3_eee_pull_config(tp, NULL);
2418 if (tp->eee.eee_active)
2422 if (!tp->setlpicnt) {
2423 if (current_link_up &&
2424 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2425 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2426 tg3_phy_toggle_auxctl_smdsp(tp, false);
2429 val = tr32(TG3_CPMU_EEE_MODE);
2430 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2434 static void tg3_phy_eee_enable(struct tg3 *tp)
2438 if (tp->link_config.active_speed == SPEED_1000 &&
2439 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2440 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2441 tg3_flag(tp, 57765_CLASS)) &&
2442 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443 val = MII_TG3_DSP_TAP26_ALNOKO |
2444 MII_TG3_DSP_TAP26_RMRXSTO;
2445 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2446 tg3_phy_toggle_auxctl_smdsp(tp, false);
2449 val = tr32(TG3_CPMU_EEE_MODE);
2450 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2453 static int tg3_wait_macro_done(struct tg3 *tp)
2460 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2461 if ((tmp32 & 0x1000) == 0)
2471 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2473 static const u32 test_pat[4][6] = {
2474 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2475 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2476 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2477 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2481 for (chan = 0; chan < 4; chan++) {
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 (chan * 0x2000) | 0x0200);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2488 for (i = 0; i < 6; i++)
2489 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2493 if (tg3_wait_macro_done(tp)) {
2498 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2499 (chan * 0x2000) | 0x0200);
2500 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2501 if (tg3_wait_macro_done(tp)) {
2506 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2507 if (tg3_wait_macro_done(tp)) {
2512 for (i = 0; i < 6; i += 2) {
2515 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2516 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2517 tg3_wait_macro_done(tp)) {
2523 if (low != test_pat[chan][i] ||
2524 high != test_pat[chan][i+1]) {
2525 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2526 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2527 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2537 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2541 for (chan = 0; chan < 4; chan++) {
2544 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2545 (chan * 0x2000) | 0x0200);
2546 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2547 for (i = 0; i < 6; i++)
2548 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2549 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2550 if (tg3_wait_macro_done(tp))
2557 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2559 u32 reg32, phy9_orig;
2560 int retries, do_phy_reset, err;
2566 err = tg3_bmcr_reset(tp);
2572 /* Disable transmitter and interrupt. */
2573 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2577 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2579 /* Set full-duplex, 1000 mbps. */
2580 tg3_writephy(tp, MII_BMCR,
2581 BMCR_FULLDPLX | BMCR_SPEED1000);
2583 /* Set to master mode. */
2584 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2587 tg3_writephy(tp, MII_CTRL1000,
2588 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2590 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2594 /* Block the PHY control access. */
2595 tg3_phydsp_write(tp, 0x8005, 0x0800);
2597 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2600 } while (--retries);
2602 err = tg3_phy_reset_chanpat(tp);
2606 tg3_phydsp_write(tp, 0x8005, 0x0000);
2608 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2609 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2611 tg3_phy_toggle_auxctl_smdsp(tp, false);
2613 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2615 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2620 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2625 static void tg3_carrier_off(struct tg3 *tp)
2627 netif_carrier_off(tp->dev);
2628 tp->link_up = false;
2631 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2633 if (tg3_flag(tp, ENABLE_ASF))
2634 netdev_warn(tp->dev,
2635 "Management side-band traffic will be interrupted during phy settings change\n");
2638 /* This will reset the tigon3 PHY if there is no valid
2639 * link unless the FORCE argument is non-zero.
2641 static int tg3_phy_reset(struct tg3 *tp)
2646 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2647 val = tr32(GRC_MISC_CFG);
2648 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2651 err = tg3_readphy(tp, MII_BMSR, &val);
2652 err |= tg3_readphy(tp, MII_BMSR, &val);
2656 if (netif_running(tp->dev) && tp->link_up) {
2657 netif_carrier_off(tp->dev);
2658 tg3_link_report(tp);
2661 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2662 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2663 tg3_asic_rev(tp) == ASIC_REV_5705) {
2664 err = tg3_phy_reset_5703_4_5(tp);
2671 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2672 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2673 cpmuctrl = tr32(TG3_CPMU_CTRL);
2674 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2676 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2679 err = tg3_bmcr_reset(tp);
2683 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2684 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2685 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2687 tw32(TG3_CPMU_CTRL, cpmuctrl);
2690 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2691 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2692 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2693 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2694 CPMU_LSPD_1000MB_MACCLK_12_5) {
2695 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2697 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2701 if (tg3_flag(tp, 5717_PLUS) &&
2702 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2705 tg3_phy_apply_otp(tp);
2707 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2708 tg3_phy_toggle_apd(tp, true);
2710 tg3_phy_toggle_apd(tp, false);
2713 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2714 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2716 tg3_phydsp_write(tp, 0x000a, 0x0323);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2721 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2726 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 tg3_phydsp_write(tp, 0x000a, 0x310b);
2728 tg3_phydsp_write(tp, 0x201f, 0x9506);
2729 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2730 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2733 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2735 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2736 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2737 tg3_writephy(tp, MII_TG3_TEST1,
2738 MII_TG3_TEST1_TRIM_EN | 0x4);
2740 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2742 tg3_phy_toggle_auxctl_smdsp(tp, false);
2746 /* Set Extended packet length bit (bit 14) on all chips that */
2747 /* support jumbo frames */
2748 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2749 /* Cannot do read-modify-write on 5401 */
2750 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2751 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 /* Set bit 14 with read-modify-write to preserve other bits */
2753 err = tg3_phy_auxctl_read(tp,
2754 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2756 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2757 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2760 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2761 * jumbo frames transmission.
2763 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2764 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2765 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2766 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2769 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2770 /* adjust output voltage */
2771 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2775 tg3_phydsp_write(tp, 0xffb, 0x4000);
2777 tg3_phy_toggle_automdix(tp, true);
2778 tg3_phy_set_wirespeed(tp);
2782 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2783 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2784 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2785 TG3_GPIO_MSG_NEED_VAUX)
2786 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2787 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2788 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2789 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2790 (TG3_GPIO_MSG_DRVR_PRES << 12))
2792 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2793 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2794 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2795 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2796 (TG3_GPIO_MSG_NEED_VAUX << 12))
2798 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2802 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2803 tg3_asic_rev(tp) == ASIC_REV_5719)
2804 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2806 status = tr32(TG3_CPMU_DRV_STATUS);
2808 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2809 status &= ~(TG3_GPIO_MSG_MASK << shift);
2810 status |= (newstat << shift);
2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5719)
2814 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2816 tw32(TG3_CPMU_DRV_STATUS, status);
2818 return status >> TG3_APE_GPIO_MSG_SHIFT;
2821 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2823 if (!tg3_flag(tp, IS_NIC))
2826 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2827 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5720) {
2829 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2832 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2834 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2837 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2839 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2840 TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2850 if (!tg3_flag(tp, IS_NIC) ||
2851 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2852 tg3_asic_rev(tp) == ASIC_REV_5701)
2855 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2857 tw32_wait_f(GRC_LOCAL_CTRL,
2858 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 tw32_wait_f(GRC_LOCAL_CTRL,
2863 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2872 if (!tg3_flag(tp, IS_NIC))
2875 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2876 tg3_asic_rev(tp) == ASIC_REV_5701) {
2877 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2878 (GRC_LCLCTRL_GPIO_OE0 |
2879 GRC_LCLCTRL_GPIO_OE1 |
2880 GRC_LCLCTRL_GPIO_OE2 |
2881 GRC_LCLCTRL_GPIO_OUTPUT0 |
2882 GRC_LCLCTRL_GPIO_OUTPUT1),
2883 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2885 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2886 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2887 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2888 GRC_LCLCTRL_GPIO_OE1 |
2889 GRC_LCLCTRL_GPIO_OE2 |
2890 GRC_LCLCTRL_GPIO_OUTPUT0 |
2891 GRC_LCLCTRL_GPIO_OUTPUT1 |
2893 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2897 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2901 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902 TG3_GRC_LCLCTL_PWRSW_DELAY);
2905 u32 grc_local_ctrl = 0;
2907 /* Workaround to prevent overdrawing Amps. */
2908 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2909 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2910 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2912 TG3_GRC_LCLCTL_PWRSW_DELAY);
2915 /* On 5753 and variants, GPIO2 cannot be used. */
2916 no_gpio2 = tp->nic_sram_data_cfg &
2917 NIC_SRAM_DATA_CFG_NO_GPIO2;
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2920 GRC_LCLCTRL_GPIO_OE1 |
2921 GRC_LCLCTRL_GPIO_OE2 |
2922 GRC_LCLCTRL_GPIO_OUTPUT1 |
2923 GRC_LCLCTRL_GPIO_OUTPUT2;
2925 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2926 GRC_LCLCTRL_GPIO_OUTPUT2);
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2934 tw32_wait_f(GRC_LOCAL_CTRL,
2935 tp->grc_local_ctrl | grc_local_ctrl,
2936 TG3_GRC_LCLCTL_PWRSW_DELAY);
2939 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2940 tw32_wait_f(GRC_LOCAL_CTRL,
2941 tp->grc_local_ctrl | grc_local_ctrl,
2942 TG3_GRC_LCLCTL_PWRSW_DELAY);
2947 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2951 /* Serialize power state transitions */
2952 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2955 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2956 msg = TG3_GPIO_MSG_NEED_VAUX;
2958 msg = tg3_set_function_status(tp, msg);
2960 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2963 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2964 tg3_pwrsrc_switch_to_vaux(tp);
2966 tg3_pwrsrc_die_with_vmain(tp);
2969 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2972 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2974 bool need_vaux = false;
2976 /* The GPIOs do something completely different on 57765. */
2977 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2980 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2981 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2982 tg3_asic_rev(tp) == ASIC_REV_5720) {
2983 tg3_frob_aux_power_5717(tp, include_wol ?
2984 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2988 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2989 struct net_device *dev_peer;
2991 dev_peer = pci_get_drvdata(tp->pdev_peer);
2993 /* remove_one() may have been run on the peer. */
2995 struct tg3 *tp_peer = netdev_priv(dev_peer);
2997 if (tg3_flag(tp_peer, INIT_COMPLETE))
3000 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3001 tg3_flag(tp_peer, ENABLE_ASF))
3006 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3007 tg3_flag(tp, ENABLE_ASF))
3011 tg3_pwrsrc_switch_to_vaux(tp);
3013 tg3_pwrsrc_die_with_vmain(tp);
3016 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3018 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3020 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3021 if (speed != SPEED_10)
3023 } else if (speed == SPEED_10)
3029 static bool tg3_phy_power_bug(struct tg3 *tp)
3031 switch (tg3_asic_rev(tp)) {
3036 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3045 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3054 static bool tg3_phy_led_bug(struct tg3 *tp)
3056 switch (tg3_asic_rev(tp)) {
3059 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3068 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3072 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3075 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3076 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3077 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3078 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3081 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3082 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3083 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3088 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3090 val = tr32(GRC_MISC_CFG);
3091 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3094 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3096 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3099 tg3_writephy(tp, MII_ADVERTISE, 0);
3100 tg3_writephy(tp, MII_BMCR,
3101 BMCR_ANENABLE | BMCR_ANRESTART);
3103 tg3_writephy(tp, MII_TG3_FET_TEST,
3104 phytest | MII_TG3_FET_SHADOW_EN);
3105 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3106 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3108 MII_TG3_FET_SHDW_AUXMODE4,
3111 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3114 } else if (do_low_power) {
3115 if (!tg3_phy_led_bug(tp))
3116 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3117 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3119 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3120 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3121 MII_TG3_AUXCTL_PCTL_VREG_11V;
3122 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3125 /* The PHY should not be powered down on some chips because
3128 if (tg3_phy_power_bug(tp))
3131 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3132 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3133 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3134 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3135 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3136 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3139 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3142 /* tp->lock is held. */
3143 static int tg3_nvram_lock(struct tg3 *tp)
3145 if (tg3_flag(tp, NVRAM)) {
3148 if (tp->nvram_lock_cnt == 0) {
3149 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3150 for (i = 0; i < 8000; i++) {
3151 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3156 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 tp->nvram_lock_cnt++;
3165 /* tp->lock is held. */
3166 static void tg3_nvram_unlock(struct tg3 *tp)
3168 if (tg3_flag(tp, NVRAM)) {
3169 if (tp->nvram_lock_cnt > 0)
3170 tp->nvram_lock_cnt--;
3171 if (tp->nvram_lock_cnt == 0)
3172 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3176 /* tp->lock is held. */
3177 static void tg3_enable_nvram_access(struct tg3 *tp)
3179 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3180 u32 nvaccess = tr32(NVRAM_ACCESS);
3182 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3186 /* tp->lock is held. */
3187 static void tg3_disable_nvram_access(struct tg3 *tp)
3189 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3190 u32 nvaccess = tr32(NVRAM_ACCESS);
3192 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3196 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 *val)
3202 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3205 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3206 EEPROM_ADDR_DEVID_MASK |
3208 tw32(GRC_EEPROM_ADDR,
3210 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3211 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3212 EEPROM_ADDR_ADDR_MASK) |
3213 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3215 for (i = 0; i < 1000; i++) {
3216 tmp = tr32(GRC_EEPROM_ADDR);
3218 if (tmp & EEPROM_ADDR_COMPLETE)
3222 if (!(tmp & EEPROM_ADDR_COMPLETE))
3225 tmp = tr32(GRC_EEPROM_DATA);
3228 * The data will always be opposite the native endian
3229 * format. Perform a blind byteswap to compensate.
3236 #define NVRAM_CMD_TIMEOUT 10000
3238 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3242 tw32(NVRAM_CMD, nvram_cmd);
3243 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3244 usleep_range(10, 40);
3245 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3251 if (i == NVRAM_CMD_TIMEOUT)
3257 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3259 if (tg3_flag(tp, NVRAM) &&
3260 tg3_flag(tp, NVRAM_BUFFERED) &&
3261 tg3_flag(tp, FLASH) &&
3262 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3263 (tp->nvram_jedecnum == JEDEC_ATMEL))
3265 addr = ((addr / tp->nvram_pagesize) <<
3266 ATMEL_AT45DB0X1B_PAGE_POS) +
3267 (addr % tp->nvram_pagesize);
3272 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3274 if (tg3_flag(tp, NVRAM) &&
3275 tg3_flag(tp, NVRAM_BUFFERED) &&
3276 tg3_flag(tp, FLASH) &&
3277 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3278 (tp->nvram_jedecnum == JEDEC_ATMEL))
3280 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3281 tp->nvram_pagesize) +
3282 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3287 /* NOTE: Data read in from NVRAM is byteswapped according to
3288 * the byteswapping settings for all other register accesses.
3289 * tg3 devices are BE devices, so on a BE machine, the data
3290 * returned will be exactly as it is seen in NVRAM. On a LE
3291 * machine, the 32-bit value will be byteswapped.
3293 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3297 if (!tg3_flag(tp, NVRAM))
3298 return tg3_nvram_read_using_eeprom(tp, offset, val);
3300 offset = tg3_nvram_phys_addr(tp, offset);
3302 if (offset > NVRAM_ADDR_MSK)
3305 ret = tg3_nvram_lock(tp);
3309 tg3_enable_nvram_access(tp);
3311 tw32(NVRAM_ADDR, offset);
3312 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3313 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3316 *val = tr32(NVRAM_RDDATA);
3318 tg3_disable_nvram_access(tp);
3320 tg3_nvram_unlock(tp);
3325 /* Ensures NVRAM data is in bytestream format. */
3326 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3329 int res = tg3_nvram_read(tp, offset, &v);
3331 *val = cpu_to_be32(v);
3335 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3336 u32 offset, u32 len, u8 *buf)
3341 for (i = 0; i < len; i += 4) {
3347 memcpy(&data, buf + i, 4);
3350 * The SEEPROM interface expects the data to always be opposite
3351 * the native endian format. We accomplish this by reversing
3352 * all the operations that would have been performed on the
3353 * data from a call to tg3_nvram_read_be32().
3355 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3357 val = tr32(GRC_EEPROM_ADDR);
3358 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3360 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3362 tw32(GRC_EEPROM_ADDR, val |
3363 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3364 (addr & EEPROM_ADDR_ADDR_MASK) |
3368 for (j = 0; j < 1000; j++) {
3369 val = tr32(GRC_EEPROM_ADDR);
3371 if (val & EEPROM_ADDR_COMPLETE)
3375 if (!(val & EEPROM_ADDR_COMPLETE)) {
3384 /* offset and length are dword aligned */
3385 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3389 u32 pagesize = tp->nvram_pagesize;
3390 u32 pagemask = pagesize - 1;
3394 tmp = kmalloc(pagesize, GFP_KERNEL);
3400 u32 phy_addr, page_off, size;
3402 phy_addr = offset & ~pagemask;
3404 for (j = 0; j < pagesize; j += 4) {
3405 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3406 (__be32 *) (tmp + j));
3413 page_off = offset & pagemask;
3420 memcpy(tmp + page_off, buf, size);
3422 offset = offset + (pagesize - page_off);
3424 tg3_enable_nvram_access(tp);
3427 * Before we can erase the flash page, we need
3428 * to issue a special "write enable" command.
3430 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 /* Erase the target page */
3436 tw32(NVRAM_ADDR, phy_addr);
3438 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3439 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3441 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3444 /* Issue another write enable to start the write. */
3445 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3447 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3450 for (j = 0; j < pagesize; j += 4) {
3453 data = *((__be32 *) (tmp + j));
3455 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3457 tw32(NVRAM_ADDR, phy_addr + j);
3459 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3463 nvram_cmd |= NVRAM_CMD_FIRST;
3464 else if (j == (pagesize - 4))
3465 nvram_cmd |= NVRAM_CMD_LAST;
3467 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3475 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3476 tg3_nvram_exec_cmd(tp, nvram_cmd);
3483 /* offset and length are dword aligned */
3484 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3489 for (i = 0; i < len; i += 4, offset += 4) {
3490 u32 page_off, phy_addr, nvram_cmd;
3493 memcpy(&data, buf + i, 4);
3494 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3496 page_off = offset % tp->nvram_pagesize;
3498 phy_addr = tg3_nvram_phys_addr(tp, offset);
3500 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3502 if (page_off == 0 || i == 0)
3503 nvram_cmd |= NVRAM_CMD_FIRST;
3504 if (page_off == (tp->nvram_pagesize - 4))
3505 nvram_cmd |= NVRAM_CMD_LAST;
3508 nvram_cmd |= NVRAM_CMD_LAST;
3510 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3511 !tg3_flag(tp, FLASH) ||
3512 !tg3_flag(tp, 57765_PLUS))
3513 tw32(NVRAM_ADDR, phy_addr);
3515 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3516 !tg3_flag(tp, 5755_PLUS) &&
3517 (tp->nvram_jedecnum == JEDEC_ST) &&
3518 (nvram_cmd & NVRAM_CMD_FIRST)) {
3521 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3522 ret = tg3_nvram_exec_cmd(tp, cmd);
3526 if (!tg3_flag(tp, FLASH)) {
3527 /* We always do complete word writes to eeprom. */
3528 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3531 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3538 /* offset and length are dword aligned */
3539 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3543 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3544 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3545 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3549 if (!tg3_flag(tp, NVRAM)) {
3550 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3554 ret = tg3_nvram_lock(tp);
3558 tg3_enable_nvram_access(tp);
3559 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3560 tw32(NVRAM_WRITE1, 0x406);
3562 grc_mode = tr32(GRC_MODE);
3563 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3565 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3566 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3569 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3573 grc_mode = tr32(GRC_MODE);
3574 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3576 tg3_disable_nvram_access(tp);
3577 tg3_nvram_unlock(tp);
3580 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3581 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3588 #define RX_CPU_SCRATCH_BASE 0x30000
3589 #define RX_CPU_SCRATCH_SIZE 0x04000
3590 #define TX_CPU_SCRATCH_BASE 0x34000
3591 #define TX_CPU_SCRATCH_SIZE 0x04000
3593 /* tp->lock is held. */
3594 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3597 const int iters = 10000;
3599 for (i = 0; i < iters; i++) {
3600 tw32(cpu_base + CPU_STATE, 0xffffffff);
3601 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3602 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3604 if (pci_channel_offline(tp->pdev))
3608 return (i == iters) ? -EBUSY : 0;
3611 /* tp->lock is held. */
3612 static int tg3_rxcpu_pause(struct tg3 *tp)
3614 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3616 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3617 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3623 /* tp->lock is held. */
3624 static int tg3_txcpu_pause(struct tg3 *tp)
3626 return tg3_pause_cpu(tp, TX_CPU_BASE);
3629 /* tp->lock is held. */
3630 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3632 tw32(cpu_base + CPU_STATE, 0xffffffff);
3633 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3636 /* tp->lock is held. */
3637 static void tg3_rxcpu_resume(struct tg3 *tp)
3639 tg3_resume_cpu(tp, RX_CPU_BASE);
3642 /* tp->lock is held. */
3643 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3647 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3649 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3650 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3652 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3655 if (cpu_base == RX_CPU_BASE) {
3656 rc = tg3_rxcpu_pause(tp);
3659 * There is only an Rx CPU for the 5750 derivative in the
3662 if (tg3_flag(tp, IS_SSB_CORE))
3665 rc = tg3_txcpu_pause(tp);
3669 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3670 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3674 /* Clear firmware's nvram arbitration. */
3675 if (tg3_flag(tp, NVRAM))
3676 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3680 static int tg3_fw_data_len(struct tg3 *tp,
3681 const struct tg3_firmware_hdr *fw_hdr)
3685 /* Non fragmented firmware have one firmware header followed by a
3686 * contiguous chunk of data to be written. The length field in that
3687 * header is not the length of data to be written but the complete
3688 * length of the bss. The data length is determined based on
3689 * tp->fw->size minus headers.
3691 * Fragmented firmware have a main header followed by multiple
3692 * fragments. Each fragment is identical to non fragmented firmware
3693 * with a firmware header followed by a contiguous chunk of data. In
3694 * the main header, the length field is unused and set to 0xffffffff.
3695 * In each fragment header the length is the entire size of that
3696 * fragment i.e. fragment data + header length. Data length is
3697 * therefore length field in the header minus TG3_FW_HDR_LEN.
3699 if (tp->fw_len == 0xffffffff)
3700 fw_len = be32_to_cpu(fw_hdr->len);
3702 fw_len = tp->fw->size;
3704 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3707 /* tp->lock is held. */
3708 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3709 u32 cpu_scratch_base, int cpu_scratch_size,
3710 const struct tg3_firmware_hdr *fw_hdr)
3713 void (*write_op)(struct tg3 *, u32, u32);
3714 int total_len = tp->fw->size;
3716 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3718 "%s: Trying to load TX cpu firmware which is 5705\n",
3723 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3724 write_op = tg3_write_mem;
3726 write_op = tg3_write_indirect_reg32;
3728 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3729 /* It is possible that bootcode is still loading at this point.
3730 * Get the nvram lock first before halting the cpu.
3732 int lock_err = tg3_nvram_lock(tp);
3733 err = tg3_halt_cpu(tp, cpu_base);
3735 tg3_nvram_unlock(tp);
3739 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3740 write_op(tp, cpu_scratch_base + i, 0);
3741 tw32(cpu_base + CPU_STATE, 0xffffffff);
3742 tw32(cpu_base + CPU_MODE,
3743 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3745 /* Subtract additional main header for fragmented firmware and
3746 * advance to the first fragment
3748 total_len -= TG3_FW_HDR_LEN;
3753 u32 *fw_data = (u32 *)(fw_hdr + 1);
3754 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3755 write_op(tp, cpu_scratch_base +
3756 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3758 be32_to_cpu(fw_data[i]));
3760 total_len -= be32_to_cpu(fw_hdr->len);
3762 /* Advance to next fragment */
3763 fw_hdr = (struct tg3_firmware_hdr *)
3764 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3765 } while (total_len > 0);
3773 /* tp->lock is held. */
3774 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3777 const int iters = 5;
3779 tw32(cpu_base + CPU_STATE, 0xffffffff);
3780 tw32_f(cpu_base + CPU_PC, pc);
3782 for (i = 0; i < iters; i++) {
3783 if (tr32(cpu_base + CPU_PC) == pc)
3785 tw32(cpu_base + CPU_STATE, 0xffffffff);
3786 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3787 tw32_f(cpu_base + CPU_PC, pc);
3791 return (i == iters) ? -EBUSY : 0;
3794 /* tp->lock is held. */
3795 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3797 const struct tg3_firmware_hdr *fw_hdr;
3800 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3802 /* Firmware blob starts with version numbers, followed by
3803 start address and length. We are setting complete length.
3804 length = end_address_of_bss - start_address_of_text.
3805 Remainder is the blob to be loaded contiguously
3806 from start address. */
3808 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3809 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3814 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3815 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3820 /* Now startup only the RX cpu. */
3821 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3822 be32_to_cpu(fw_hdr->base_addr));
3824 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3825 "should be %08x\n", __func__,
3826 tr32(RX_CPU_BASE + CPU_PC),
3827 be32_to_cpu(fw_hdr->base_addr));
3831 tg3_rxcpu_resume(tp);
3836 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3838 const int iters = 1000;
3842 /* Wait for boot code to complete initialization and enter service
3843 * loop. It is then safe to download service patches
3845 for (i = 0; i < iters; i++) {
3846 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3853 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3857 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3859 netdev_warn(tp->dev,
3860 "Other patches exist. Not downloading EEE patch\n");
3867 /* tp->lock is held. */
3868 static void tg3_load_57766_firmware(struct tg3 *tp)
3870 struct tg3_firmware_hdr *fw_hdr;
3872 if (!tg3_flag(tp, NO_NVRAM))
3875 if (tg3_validate_rxcpu_state(tp))
3881 /* This firmware blob has a different format than older firmware
3882 * releases as given below. The main difference is we have fragmented
3883 * data to be written to non-contiguous locations.
3885 * In the beginning we have a firmware header identical to other
3886 * firmware which consists of version, base addr and length. The length
3887 * here is unused and set to 0xffffffff.
3889 * This is followed by a series of firmware fragments which are
3890 * individually identical to previous firmware. i.e. they have the
3891 * firmware header and followed by data for that fragment. The version
3892 * field of the individual fragment header is unused.
3895 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3896 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3899 if (tg3_rxcpu_pause(tp))
3902 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3903 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3905 tg3_rxcpu_resume(tp);
3908 /* tp->lock is held. */
3909 static int tg3_load_tso_firmware(struct tg3 *tp)
3911 const struct tg3_firmware_hdr *fw_hdr;
3912 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3915 if (!tg3_flag(tp, FW_TSO))
3918 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3920 /* Firmware blob starts with version numbers, followed by
3921 start address and length. We are setting complete length.
3922 length = end_address_of_bss - start_address_of_text.
3923 Remainder is the blob to be loaded contiguously
3924 from start address. */
3926 cpu_scratch_size = tp->fw_len;
3928 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3929 cpu_base = RX_CPU_BASE;
3930 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3932 cpu_base = TX_CPU_BASE;
3933 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3934 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3937 err = tg3_load_firmware_cpu(tp, cpu_base,
3938 cpu_scratch_base, cpu_scratch_size,
3943 /* Now startup the cpu. */
3944 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3945 be32_to_cpu(fw_hdr->base_addr));
3948 "%s fails to set CPU PC, is %08x should be %08x\n",
3949 __func__, tr32(cpu_base + CPU_PC),
3950 be32_to_cpu(fw_hdr->base_addr));
3954 tg3_resume_cpu(tp, cpu_base);
3958 /* tp->lock is held. */
3959 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3961 u32 addr_high, addr_low;
3963 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3964 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3965 (mac_addr[4] << 8) | mac_addr[5]);
3968 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3969 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3972 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3973 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3977 /* tp->lock is held. */
3978 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3983 for (i = 0; i < 4; i++) {
3984 if (i == 1 && skip_mac_1)
3986 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3989 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3990 tg3_asic_rev(tp) == ASIC_REV_5704) {
3991 for (i = 4; i < 16; i++)
3992 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3995 addr_high = (tp->dev->dev_addr[0] +
3996 tp->dev->dev_addr[1] +
3997 tp->dev->dev_addr[2] +
3998 tp->dev->dev_addr[3] +
3999 tp->dev->dev_addr[4] +
4000 tp->dev->dev_addr[5]) &
4001 TX_BACKOFF_SEED_MASK;
4002 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4005 static void tg3_enable_register_access(struct tg3 *tp)
4008 * Make sure register accesses (indirect or otherwise) will function
4011 pci_write_config_dword(tp->pdev,
4012 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4015 static int tg3_power_up(struct tg3 *tp)
4019 tg3_enable_register_access(tp);
4021 err = pci_set_power_state(tp->pdev, PCI_D0);
4023 /* Switch out of Vaux if it is a NIC */
4024 tg3_pwrsrc_switch_to_vmain(tp);
4026 netdev_err(tp->dev, "Transition to D0 failed\n");
4032 static int tg3_setup_phy(struct tg3 *, bool);
4034 static int tg3_power_down_prepare(struct tg3 *tp)
4037 bool device_should_wake, do_low_power;
4039 tg3_enable_register_access(tp);
4041 /* Restore the CLKREQ setting. */
4042 if (tg3_flag(tp, CLKREQ_BUG))
4043 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4044 PCI_EXP_LNKCTL_CLKREQ_EN);
4046 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4047 tw32(TG3PCI_MISC_HOST_CTRL,
4048 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4050 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4051 tg3_flag(tp, WOL_ENABLE);
4053 if (tg3_flag(tp, USE_PHYLIB)) {
4054 do_low_power = false;
4055 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4056 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4057 struct phy_device *phydev;
4058 u32 phyid, advertising;
4060 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4062 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4064 tp->link_config.speed = phydev->speed;
4065 tp->link_config.duplex = phydev->duplex;
4066 tp->link_config.autoneg = phydev->autoneg;
4067 tp->link_config.advertising = phydev->advertising;
4069 advertising = ADVERTISED_TP |
4071 ADVERTISED_Autoneg |
4072 ADVERTISED_10baseT_Half;
4074 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4075 if (tg3_flag(tp, WOL_SPEED_100MB))
4077 ADVERTISED_100baseT_Half |
4078 ADVERTISED_100baseT_Full |
4079 ADVERTISED_10baseT_Full;
4081 advertising |= ADVERTISED_10baseT_Full;
4084 phydev->advertising = advertising;
4086 phy_start_aneg(phydev);
4088 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4089 if (phyid != PHY_ID_BCMAC131) {
4090 phyid &= PHY_BCM_OUI_MASK;
4091 if (phyid == PHY_BCM_OUI_1 ||
4092 phyid == PHY_BCM_OUI_2 ||
4093 phyid == PHY_BCM_OUI_3)
4094 do_low_power = true;
4098 do_low_power = true;
4100 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4101 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4103 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4104 tg3_setup_phy(tp, false);
4107 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4110 val = tr32(GRC_VCPU_EXT_CTRL);
4111 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4112 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4116 for (i = 0; i < 200; i++) {
4117 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4118 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4123 if (tg3_flag(tp, WOL_CAP))
4124 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4125 WOL_DRV_STATE_SHUTDOWN |
4129 if (device_should_wake) {
4132 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4134 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4135 tg3_phy_auxctl_write(tp,
4136 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4137 MII_TG3_AUXCTL_PCTL_WOL_EN |
4138 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4139 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4143 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4144 mac_mode = MAC_MODE_PORT_MODE_GMII;
4145 else if (tp->phy_flags &
4146 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4147 if (tp->link_config.active_speed == SPEED_1000)
4148 mac_mode = MAC_MODE_PORT_MODE_GMII;
4150 mac_mode = MAC_MODE_PORT_MODE_MII;
4152 mac_mode = MAC_MODE_PORT_MODE_MII;
4154 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4155 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4156 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4157 SPEED_100 : SPEED_10;
4158 if (tg3_5700_link_polarity(tp, speed))
4159 mac_mode |= MAC_MODE_LINK_POLARITY;
4161 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4164 mac_mode = MAC_MODE_PORT_MODE_TBI;
4167 if (!tg3_flag(tp, 5750_PLUS))
4168 tw32(MAC_LED_CTRL, tp->led_ctrl);
4170 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4171 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4172 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4173 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4175 if (tg3_flag(tp, ENABLE_APE))
4176 mac_mode |= MAC_MODE_APE_TX_EN |
4177 MAC_MODE_APE_RX_EN |
4178 MAC_MODE_TDE_ENABLE;
4180 tw32_f(MAC_MODE, mac_mode);
4183 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4187 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4188 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4192 base_val = tp->pci_clock_ctrl;
4193 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4194 CLOCK_CTRL_TXCLK_DISABLE);
4196 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4197 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4198 } else if (tg3_flag(tp, 5780_CLASS) ||
4199 tg3_flag(tp, CPMU_PRESENT) ||
4200 tg3_asic_rev(tp) == ASIC_REV_5906) {
4202 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4203 u32 newbits1, newbits2;
4205 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4206 tg3_asic_rev(tp) == ASIC_REV_5701) {
4207 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4208 CLOCK_CTRL_TXCLK_DISABLE |
4210 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4211 } else if (tg3_flag(tp, 5705_PLUS)) {
4212 newbits1 = CLOCK_CTRL_625_CORE;
4213 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4215 newbits1 = CLOCK_CTRL_ALTCLK;
4216 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4222 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4225 if (!tg3_flag(tp, 5705_PLUS)) {
4228 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4229 tg3_asic_rev(tp) == ASIC_REV_5701) {
4230 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4231 CLOCK_CTRL_TXCLK_DISABLE |
4232 CLOCK_CTRL_44MHZ_CORE);
4234 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4237 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4238 tp->pci_clock_ctrl | newbits3, 40);
4242 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4243 tg3_power_down_phy(tp, do_low_power);
4245 tg3_frob_aux_power(tp, true);
4247 /* Workaround for unstable PLL clock */
4248 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4249 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4250 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4251 u32 val = tr32(0x7d00);
4253 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4255 if (!tg3_flag(tp, ENABLE_ASF)) {
4258 err = tg3_nvram_lock(tp);
4259 tg3_halt_cpu(tp, RX_CPU_BASE);
4261 tg3_nvram_unlock(tp);
4265 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4267 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4272 static void tg3_power_down(struct tg3 *tp)
4274 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4275 pci_set_power_state(tp->pdev, PCI_D3hot);
4278 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4280 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4281 case MII_TG3_AUX_STAT_10HALF:
4283 *duplex = DUPLEX_HALF;
4286 case MII_TG3_AUX_STAT_10FULL:
4288 *duplex = DUPLEX_FULL;
4291 case MII_TG3_AUX_STAT_100HALF:
4293 *duplex = DUPLEX_HALF;
4296 case MII_TG3_AUX_STAT_100FULL:
4298 *duplex = DUPLEX_FULL;
4301 case MII_TG3_AUX_STAT_1000HALF:
4302 *speed = SPEED_1000;
4303 *duplex = DUPLEX_HALF;
4306 case MII_TG3_AUX_STAT_1000FULL:
4307 *speed = SPEED_1000;
4308 *duplex = DUPLEX_FULL;
4312 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4313 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4315 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4319 *speed = SPEED_UNKNOWN;
4320 *duplex = DUPLEX_UNKNOWN;
4325 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4330 new_adv = ADVERTISE_CSMA;
4331 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4332 new_adv |= mii_advertise_flowctrl(flowctrl);
4334 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4338 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4339 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4341 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4342 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4343 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4345 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4350 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4353 tw32(TG3_CPMU_EEE_MODE,
4354 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4356 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4361 /* Advertise 100-BaseTX EEE ability */
4362 if (advertise & ADVERTISED_100baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_100TX;
4364 /* Advertise 1000-BaseT EEE ability */
4365 if (advertise & ADVERTISED_1000baseT_Full)
4366 val |= MDIO_AN_EEE_ADV_1000T;
4368 if (!tp->eee.eee_enabled) {
4370 tp->eee.advertised = 0;
4372 tp->eee.advertised = advertise &
4373 (ADVERTISED_100baseT_Full |
4374 ADVERTISED_1000baseT_Full);
4377 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4381 switch (tg3_asic_rev(tp)) {
4383 case ASIC_REV_57765:
4384 case ASIC_REV_57766:
4386 /* If we advertised any eee advertisements above... */
4388 val = MII_TG3_DSP_TAP26_ALNOKO |
4389 MII_TG3_DSP_TAP26_RMRXSTO |
4390 MII_TG3_DSP_TAP26_OPCSINPT;
4391 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4395 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4396 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4397 MII_TG3_DSP_CH34TP2_HIBW01);
4400 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4409 static void tg3_phy_copper_begin(struct tg3 *tp)
4411 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4412 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4415 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4416 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4417 adv = ADVERTISED_10baseT_Half |
4418 ADVERTISED_10baseT_Full;
4419 if (tg3_flag(tp, WOL_SPEED_100MB))
4420 adv |= ADVERTISED_100baseT_Half |
4421 ADVERTISED_100baseT_Full;
4422 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4423 if (!(tp->phy_flags &
4424 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4425 adv |= ADVERTISED_1000baseT_Half;
4426 adv |= ADVERTISED_1000baseT_Full;
4429 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4431 adv = tp->link_config.advertising;
4432 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4433 adv &= ~(ADVERTISED_1000baseT_Half |
4434 ADVERTISED_1000baseT_Full);
4436 fc = tp->link_config.flowctrl;
4439 tg3_phy_autoneg_cfg(tp, adv, fc);
4441 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4442 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4443 /* Normally during power down we want to autonegotiate
4444 * the lowest possible speed for WOL. However, to avoid
4445 * link flap, we leave it untouched.
4450 tg3_writephy(tp, MII_BMCR,
4451 BMCR_ANENABLE | BMCR_ANRESTART);
4454 u32 bmcr, orig_bmcr;
4456 tp->link_config.active_speed = tp->link_config.speed;
4457 tp->link_config.active_duplex = tp->link_config.duplex;
4459 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4460 /* With autoneg disabled, 5715 only links up when the
4461 * advertisement register has the configured speed
4464 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4468 switch (tp->link_config.speed) {
4474 bmcr |= BMCR_SPEED100;
4478 bmcr |= BMCR_SPEED1000;
4482 if (tp->link_config.duplex == DUPLEX_FULL)
4483 bmcr |= BMCR_FULLDPLX;
4485 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4486 (bmcr != orig_bmcr)) {
4487 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4488 for (i = 0; i < 1500; i++) {
4492 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4493 tg3_readphy(tp, MII_BMSR, &tmp))
4495 if (!(tmp & BMSR_LSTATUS)) {
4500 tg3_writephy(tp, MII_BMCR, bmcr);
4506 static int tg3_phy_pull_config(struct tg3 *tp)
4511 err = tg3_readphy(tp, MII_BMCR, &val);
4515 if (!(val & BMCR_ANENABLE)) {
4516 tp->link_config.autoneg = AUTONEG_DISABLE;
4517 tp->link_config.advertising = 0;
4518 tg3_flag_clear(tp, PAUSE_AUTONEG);
4522 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4524 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527 tp->link_config.speed = SPEED_10;
4530 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4533 tp->link_config.speed = SPEED_100;
4535 case BMCR_SPEED1000:
4536 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4537 tp->link_config.speed = SPEED_1000;
4545 if (val & BMCR_FULLDPLX)
4546 tp->link_config.duplex = DUPLEX_FULL;
4548 tp->link_config.duplex = DUPLEX_HALF;
4550 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4556 tp->link_config.autoneg = AUTONEG_ENABLE;
4557 tp->link_config.advertising = ADVERTISED_Autoneg;
4558 tg3_flag_set(tp, PAUSE_AUTONEG);
4560 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4568 tp->link_config.advertising |= adv | ADVERTISED_TP;
4570 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4572 tp->link_config.advertising |= ADVERTISED_FIBRE;
4575 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4578 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4579 err = tg3_readphy(tp, MII_CTRL1000, &val);
4583 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4585 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4589 adv = tg3_decode_flowctrl_1000X(val);
4590 tp->link_config.flowctrl = adv;
4592 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4593 adv = mii_adv_to_ethtool_adv_x(val);
4596 tp->link_config.advertising |= adv;
4603 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4607 /* Turn off tap power management. */
4608 /* Set Extended packet length bit */
4609 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4611 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4612 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4613 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4614 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4615 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4622 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4624 struct ethtool_eee eee;
4626 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4629 tg3_eee_pull_config(tp, &eee);
4631 if (tp->eee.eee_enabled) {
4632 if (tp->eee.advertised != eee.advertised ||
4633 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4634 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4637 /* EEE is disabled but we're advertising */
4645 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4647 u32 advmsk, tgtadv, advertising;
4649 advertising = tp->link_config.advertising;
4650 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4652 advmsk = ADVERTISE_ALL;
4653 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4654 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4655 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4658 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4661 if ((*lcladv & advmsk) != tgtadv)
4664 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4667 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4669 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4673 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4675 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4677 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4679 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4682 if (tg3_ctrl != tgtadv)
4689 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4693 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4696 if (tg3_readphy(tp, MII_STAT1000, &val))
4699 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4702 if (tg3_readphy(tp, MII_LPA, rmtadv))
4705 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4706 tp->link_config.rmt_adv = lpeth;
4711 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4713 if (curr_link_up != tp->link_up) {
4715 netif_carrier_on(tp->dev);
4717 netif_carrier_off(tp->dev);
4718 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4719 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4722 tg3_link_report(tp);
4729 static void tg3_clear_mac_status(struct tg3 *tp)
4734 MAC_STATUS_SYNC_CHANGED |
4735 MAC_STATUS_CFG_CHANGED |
4736 MAC_STATUS_MI_COMPLETION |
4737 MAC_STATUS_LNKSTATE_CHANGED);
4741 static void tg3_setup_eee(struct tg3 *tp)
4745 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4746 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4747 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4748 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4750 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4752 tw32_f(TG3_CPMU_EEE_CTRL,
4753 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4755 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4756 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4757 TG3_CPMU_EEEMD_LPI_IN_RX |
4758 TG3_CPMU_EEEMD_EEE_ENABLE;
4760 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4761 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4763 if (tg3_flag(tp, ENABLE_APE))
4764 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4766 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4768 tw32_f(TG3_CPMU_EEE_DBTMR1,
4769 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4770 (tp->eee.tx_lpi_timer & 0xffff));
4772 tw32_f(TG3_CPMU_EEE_DBTMR2,
4773 TG3_CPMU_DBTMR2_APE_TX_2047US |
4774 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4777 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4779 bool current_link_up;
4781 u32 lcl_adv, rmt_adv;
4786 tg3_clear_mac_status(tp);
4788 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4790 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4794 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4796 /* Some third-party PHYs need to be reset on link going
4799 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4800 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4801 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4803 tg3_readphy(tp, MII_BMSR, &bmsr);
4804 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4805 !(bmsr & BMSR_LSTATUS))
4811 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4812 tg3_readphy(tp, MII_BMSR, &bmsr);
4813 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4814 !tg3_flag(tp, INIT_COMPLETE))
4817 if (!(bmsr & BMSR_LSTATUS)) {
4818 err = tg3_init_5401phy_dsp(tp);
4822 tg3_readphy(tp, MII_BMSR, &bmsr);
4823 for (i = 0; i < 1000; i++) {
4825 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4826 (bmsr & BMSR_LSTATUS)) {
4832 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4833 TG3_PHY_REV_BCM5401_B0 &&
4834 !(bmsr & BMSR_LSTATUS) &&
4835 tp->link_config.active_speed == SPEED_1000) {
4836 err = tg3_phy_reset(tp);
4838 err = tg3_init_5401phy_dsp(tp);
4843 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4844 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4845 /* 5701 {A0,B0} CRC bug workaround */
4846 tg3_writephy(tp, 0x15, 0x0a75);
4847 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4849 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 /* Clear pending interrupts... */
4853 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4854 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4857 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4858 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4859 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4861 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4862 tg3_asic_rev(tp) == ASIC_REV_5701) {
4863 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4865 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4867 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4870 current_link_up = false;
4871 current_speed = SPEED_UNKNOWN;
4872 current_duplex = DUPLEX_UNKNOWN;
4873 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4874 tp->link_config.rmt_adv = 0;
4876 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4877 err = tg3_phy_auxctl_read(tp,
4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 if (!err && !(val & (1 << 10))) {
4881 tg3_phy_auxctl_write(tp,
4882 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4889 for (i = 0; i < 100; i++) {
4890 tg3_readphy(tp, MII_BMSR, &bmsr);
4891 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4892 (bmsr & BMSR_LSTATUS))
4897 if (bmsr & BMSR_LSTATUS) {
4900 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4901 for (i = 0; i < 2000; i++) {
4903 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4908 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4913 for (i = 0; i < 200; i++) {
4914 tg3_readphy(tp, MII_BMCR, &bmcr);
4915 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4917 if (bmcr && bmcr != 0x7fff)
4925 tp->link_config.active_speed = current_speed;
4926 tp->link_config.active_duplex = current_duplex;
4928 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4929 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4931 if ((bmcr & BMCR_ANENABLE) &&
4933 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4934 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4935 current_link_up = true;
4937 /* EEE settings changes take effect only after a phy
4938 * reset. If we have skipped a reset due to Link Flap
4939 * Avoidance being enabled, do it now.
4941 if (!eee_config_ok &&
4942 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4948 if (!(bmcr & BMCR_ANENABLE) &&
4949 tp->link_config.speed == current_speed &&
4950 tp->link_config.duplex == current_duplex) {
4951 current_link_up = true;
4955 if (current_link_up &&
4956 tp->link_config.active_duplex == DUPLEX_FULL) {
4959 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4960 reg = MII_TG3_FET_GEN_STAT;
4961 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4963 reg = MII_TG3_EXT_STAT;
4964 bit = MII_TG3_EXT_STAT_MDIX;
4967 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4968 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4970 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4975 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4976 tg3_phy_copper_begin(tp);
4978 if (tg3_flag(tp, ROBOSWITCH)) {
4979 current_link_up = true;
4980 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4981 current_speed = SPEED_1000;
4982 current_duplex = DUPLEX_FULL;
4983 tp->link_config.active_speed = current_speed;
4984 tp->link_config.active_duplex = current_duplex;
4987 tg3_readphy(tp, MII_BMSR, &bmsr);
4988 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4989 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4990 current_link_up = true;
4993 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4994 if (current_link_up) {
4995 if (tp->link_config.active_speed == SPEED_100 ||
4996 tp->link_config.active_speed == SPEED_10)
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5001 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5003 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5005 /* In order for the 5750 core in BCM4785 chip to work properly
5006 * in RGMII mode, the Led Control Register must be set up.
5008 if (tg3_flag(tp, RGMII_MODE)) {
5009 u32 led_ctrl = tr32(MAC_LED_CTRL);
5010 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5012 if (tp->link_config.active_speed == SPEED_10)
5013 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5014 else if (tp->link_config.active_speed == SPEED_100)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_100MBPS_ON);
5017 else if (tp->link_config.active_speed == SPEED_1000)
5018 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019 LED_CTRL_1000MBPS_ON);
5021 tw32(MAC_LED_CTRL, led_ctrl);
5025 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5026 if (tp->link_config.active_duplex == DUPLEX_HALF)
5027 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5029 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5030 if (current_link_up &&
5031 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5032 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5034 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5037 /* ??? Without this setting Netgear GA302T PHY does not
5038 * ??? send/receive packets...
5040 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5041 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5042 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5043 tw32_f(MAC_MI_MODE, tp->mi_mode);
5047 tw32_f(MAC_MODE, tp->mac_mode);
5050 tg3_phy_eee_adjust(tp, current_link_up);
5052 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5053 /* Polled via timer. */
5054 tw32_f(MAC_EVENT, 0);
5056 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5060 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5062 tp->link_config.active_speed == SPEED_1000 &&
5063 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5066 (MAC_STATUS_SYNC_CHANGED |
5067 MAC_STATUS_CFG_CHANGED));
5070 NIC_SRAM_FIRMWARE_MBOX,
5071 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5074 /* Prevent send BD corruption. */
5075 if (tg3_flag(tp, CLKREQ_BUG)) {
5076 if (tp->link_config.active_speed == SPEED_100 ||
5077 tp->link_config.active_speed == SPEED_10)
5078 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5082 PCI_EXP_LNKCTL_CLKREQ_EN);
5085 tg3_test_and_report_link_chg(tp, current_link_up);
5090 struct tg3_fiber_aneginfo {
5092 #define ANEG_STATE_UNKNOWN 0
5093 #define ANEG_STATE_AN_ENABLE 1
5094 #define ANEG_STATE_RESTART_INIT 2
5095 #define ANEG_STATE_RESTART 3
5096 #define ANEG_STATE_DISABLE_LINK_OK 4
5097 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5098 #define ANEG_STATE_ABILITY_DETECT 6
5099 #define ANEG_STATE_ACK_DETECT_INIT 7
5100 #define ANEG_STATE_ACK_DETECT 8
5101 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5102 #define ANEG_STATE_COMPLETE_ACK 10
5103 #define ANEG_STATE_IDLE_DETECT_INIT 11
5104 #define ANEG_STATE_IDLE_DETECT 12
5105 #define ANEG_STATE_LINK_OK 13
5106 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5107 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5110 #define MR_AN_ENABLE 0x00000001
5111 #define MR_RESTART_AN 0x00000002
5112 #define MR_AN_COMPLETE 0x00000004
5113 #define MR_PAGE_RX 0x00000008
5114 #define MR_NP_LOADED 0x00000010
5115 #define MR_TOGGLE_TX 0x00000020
5116 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5117 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5118 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5119 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5120 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5121 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5122 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5123 #define MR_TOGGLE_RX 0x00002000
5124 #define MR_NP_RX 0x00004000
5126 #define MR_LINK_OK 0x80000000
5128 unsigned long link_time, cur_time;
5130 u32 ability_match_cfg;
5131 int ability_match_count;
5133 char ability_match, idle_match, ack_match;
5135 u32 txconfig, rxconfig;
5136 #define ANEG_CFG_NP 0x00000080
5137 #define ANEG_CFG_ACK 0x00000040
5138 #define ANEG_CFG_RF2 0x00000020
5139 #define ANEG_CFG_RF1 0x00000010
5140 #define ANEG_CFG_PS2 0x00000001
5141 #define ANEG_CFG_PS1 0x00008000
5142 #define ANEG_CFG_HD 0x00004000
5143 #define ANEG_CFG_FD 0x00002000
5144 #define ANEG_CFG_INVAL 0x00001f06
5149 #define ANEG_TIMER_ENAB 2
5150 #define ANEG_FAILED -1
5152 #define ANEG_STATE_SETTLE_TIME 10000
5154 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5155 struct tg3_fiber_aneginfo *ap)
5158 unsigned long delta;
5162 if (ap->state == ANEG_STATE_UNKNOWN) {
5166 ap->ability_match_cfg = 0;
5167 ap->ability_match_count = 0;
5168 ap->ability_match = 0;
5174 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5175 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5177 if (rx_cfg_reg != ap->ability_match_cfg) {
5178 ap->ability_match_cfg = rx_cfg_reg;
5179 ap->ability_match = 0;
5180 ap->ability_match_count = 0;
5182 if (++ap->ability_match_count > 1) {
5183 ap->ability_match = 1;
5184 ap->ability_match_cfg = rx_cfg_reg;
5187 if (rx_cfg_reg & ANEG_CFG_ACK)
5195 ap->ability_match_cfg = 0;
5196 ap->ability_match_count = 0;
5197 ap->ability_match = 0;
5203 ap->rxconfig = rx_cfg_reg;
5206 switch (ap->state) {
5207 case ANEG_STATE_UNKNOWN:
5208 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5209 ap->state = ANEG_STATE_AN_ENABLE;
5212 case ANEG_STATE_AN_ENABLE:
5213 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5214 if (ap->flags & MR_AN_ENABLE) {
5217 ap->ability_match_cfg = 0;
5218 ap->ability_match_count = 0;
5219 ap->ability_match = 0;
5223 ap->state = ANEG_STATE_RESTART_INIT;
5225 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5229 case ANEG_STATE_RESTART_INIT:
5230 ap->link_time = ap->cur_time;
5231 ap->flags &= ~(MR_NP_LOADED);
5233 tw32(MAC_TX_AUTO_NEG, 0);
5234 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5235 tw32_f(MAC_MODE, tp->mac_mode);
5238 ret = ANEG_TIMER_ENAB;
5239 ap->state = ANEG_STATE_RESTART;
5242 case ANEG_STATE_RESTART:
5243 delta = ap->cur_time - ap->link_time;
5244 if (delta > ANEG_STATE_SETTLE_TIME)
5245 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5247 ret = ANEG_TIMER_ENAB;
5250 case ANEG_STATE_DISABLE_LINK_OK:
5254 case ANEG_STATE_ABILITY_DETECT_INIT:
5255 ap->flags &= ~(MR_TOGGLE_TX);
5256 ap->txconfig = ANEG_CFG_FD;
5257 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5258 if (flowctrl & ADVERTISE_1000XPAUSE)
5259 ap->txconfig |= ANEG_CFG_PS1;
5260 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5261 ap->txconfig |= ANEG_CFG_PS2;
5262 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5263 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5264 tw32_f(MAC_MODE, tp->mac_mode);
5267 ap->state = ANEG_STATE_ABILITY_DETECT;
5270 case ANEG_STATE_ABILITY_DETECT:
5271 if (ap->ability_match != 0 && ap->rxconfig != 0)
5272 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5275 case ANEG_STATE_ACK_DETECT_INIT:
5276 ap->txconfig |= ANEG_CFG_ACK;
5277 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5278 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5279 tw32_f(MAC_MODE, tp->mac_mode);
5282 ap->state = ANEG_STATE_ACK_DETECT;
5285 case ANEG_STATE_ACK_DETECT:
5286 if (ap->ack_match != 0) {
5287 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5288 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5289 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5291 ap->state = ANEG_STATE_AN_ENABLE;
5293 } else if (ap->ability_match != 0 &&
5294 ap->rxconfig == 0) {
5295 ap->state = ANEG_STATE_AN_ENABLE;
5299 case ANEG_STATE_COMPLETE_ACK_INIT:
5300 if (ap->rxconfig & ANEG_CFG_INVAL) {
5304 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5305 MR_LP_ADV_HALF_DUPLEX |
5306 MR_LP_ADV_SYM_PAUSE |
5307 MR_LP_ADV_ASYM_PAUSE |
5308 MR_LP_ADV_REMOTE_FAULT1 |
5309 MR_LP_ADV_REMOTE_FAULT2 |
5310 MR_LP_ADV_NEXT_PAGE |
5313 if (ap->rxconfig & ANEG_CFG_FD)
5314 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5315 if (ap->rxconfig & ANEG_CFG_HD)
5316 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5317 if (ap->rxconfig & ANEG_CFG_PS1)
5318 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5319 if (ap->rxconfig & ANEG_CFG_PS2)
5320 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5321 if (ap->rxconfig & ANEG_CFG_RF1)
5322 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5323 if (ap->rxconfig & ANEG_CFG_RF2)
5324 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5325 if (ap->rxconfig & ANEG_CFG_NP)
5326 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5328 ap->link_time = ap->cur_time;
5330 ap->flags ^= (MR_TOGGLE_TX);
5331 if (ap->rxconfig & 0x0008)
5332 ap->flags |= MR_TOGGLE_RX;
5333 if (ap->rxconfig & ANEG_CFG_NP)
5334 ap->flags |= MR_NP_RX;
5335 ap->flags |= MR_PAGE_RX;
5337 ap->state = ANEG_STATE_COMPLETE_ACK;
5338 ret = ANEG_TIMER_ENAB;
5341 case ANEG_STATE_COMPLETE_ACK:
5342 if (ap->ability_match != 0 &&
5343 ap->rxconfig == 0) {
5344 ap->state = ANEG_STATE_AN_ENABLE;
5347 delta = ap->cur_time - ap->link_time;
5348 if (delta > ANEG_STATE_SETTLE_TIME) {
5349 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5353 !(ap->flags & MR_NP_RX)) {
5354 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5362 case ANEG_STATE_IDLE_DETECT_INIT:
5363 ap->link_time = ap->cur_time;
5364 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5365 tw32_f(MAC_MODE, tp->mac_mode);
5368 ap->state = ANEG_STATE_IDLE_DETECT;
5369 ret = ANEG_TIMER_ENAB;
5372 case ANEG_STATE_IDLE_DETECT:
5373 if (ap->ability_match != 0 &&
5374 ap->rxconfig == 0) {
5375 ap->state = ANEG_STATE_AN_ENABLE;
5378 delta = ap->cur_time - ap->link_time;
5379 if (delta > ANEG_STATE_SETTLE_TIME) {
5380 /* XXX another gem from the Broadcom driver :( */
5381 ap->state = ANEG_STATE_LINK_OK;
5385 case ANEG_STATE_LINK_OK:
5386 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5390 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5391 /* ??? unimplemented */
5394 case ANEG_STATE_NEXT_PAGE_WAIT:
5395 /* ??? unimplemented */
5406 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5409 struct tg3_fiber_aneginfo aninfo;
5410 int status = ANEG_FAILED;
5414 tw32_f(MAC_TX_AUTO_NEG, 0);
5416 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5417 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5420 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5423 memset(&aninfo, 0, sizeof(aninfo));
5424 aninfo.flags |= MR_AN_ENABLE;
5425 aninfo.state = ANEG_STATE_UNKNOWN;
5426 aninfo.cur_time = 0;
5428 while (++tick < 195000) {
5429 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5430 if (status == ANEG_DONE || status == ANEG_FAILED)
5436 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5437 tw32_f(MAC_MODE, tp->mac_mode);
5440 *txflags = aninfo.txconfig;
5441 *rxflags = aninfo.flags;
5443 if (status == ANEG_DONE &&
5444 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5445 MR_LP_ADV_FULL_DUPLEX)))
5451 static void tg3_init_bcm8002(struct tg3 *tp)
5453 u32 mac_status = tr32(MAC_STATUS);
5456 /* Reset when initting first time or we have a link. */
5457 if (tg3_flag(tp, INIT_COMPLETE) &&
5458 !(mac_status & MAC_STATUS_PCS_SYNCED))
5461 /* Set PLL lock range. */
5462 tg3_writephy(tp, 0x16, 0x8007);
5465 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5467 /* Wait for reset to complete. */
5468 /* XXX schedule_timeout() ... */
5469 for (i = 0; i < 500; i++)
5472 /* Config mode; select PMA/Ch 1 regs. */
5473 tg3_writephy(tp, 0x10, 0x8411);
5475 /* Enable auto-lock and comdet, select txclk for tx. */
5476 tg3_writephy(tp, 0x11, 0x0a10);
5478 tg3_writephy(tp, 0x18, 0x00a0);
5479 tg3_writephy(tp, 0x16, 0x41ff);
5481 /* Assert and deassert POR. */
5482 tg3_writephy(tp, 0x13, 0x0400);
5484 tg3_writephy(tp, 0x13, 0x0000);
5486 tg3_writephy(tp, 0x11, 0x0a50);
5488 tg3_writephy(tp, 0x11, 0x0a10);
5490 /* Wait for signal to stabilize */
5491 /* XXX schedule_timeout() ... */
5492 for (i = 0; i < 15000; i++)
5495 /* Deselect the channel register so we can read the PHYID
5498 tg3_writephy(tp, 0x10, 0x8011);
5501 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5504 bool current_link_up;
5505 u32 sg_dig_ctrl, sg_dig_status;
5506 u32 serdes_cfg, expected_sg_dig_ctrl;
5507 int workaround, port_a;
5510 expected_sg_dig_ctrl = 0;
5513 current_link_up = false;
5515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5516 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5518 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5521 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5522 /* preserve bits 20-23 for voltage regulator */
5523 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5526 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5528 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5529 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5531 u32 val = serdes_cfg;
5537 tw32_f(MAC_SERDES_CFG, val);
5540 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5542 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5543 tg3_setup_flow_control(tp, 0, 0);
5544 current_link_up = true;
5549 /* Want auto-negotiation. */
5550 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5552 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5553 if (flowctrl & ADVERTISE_1000XPAUSE)
5554 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5555 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5556 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5558 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5559 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5560 tp->serdes_counter &&
5561 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5562 MAC_STATUS_RCVD_CFG)) ==
5563 MAC_STATUS_PCS_SYNCED)) {
5564 tp->serdes_counter--;
5565 current_link_up = true;
5570 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5571 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5573 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5575 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5576 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5577 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5578 MAC_STATUS_SIGNAL_DET)) {
5579 sg_dig_status = tr32(SG_DIG_STATUS);
5580 mac_status = tr32(MAC_STATUS);
5582 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5583 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5584 u32 local_adv = 0, remote_adv = 0;
5586 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5587 local_adv |= ADVERTISE_1000XPAUSE;
5588 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5589 local_adv |= ADVERTISE_1000XPSE_ASYM;
5591 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5592 remote_adv |= LPA_1000XPAUSE;
5593 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5594 remote_adv |= LPA_1000XPAUSE_ASYM;
5596 tp->link_config.rmt_adv =
5597 mii_adv_to_ethtool_adv_x(remote_adv);
5599 tg3_setup_flow_control(tp, local_adv, remote_adv);
5600 current_link_up = true;
5601 tp->serdes_counter = 0;
5602 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5603 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5604 if (tp->serdes_counter)
5605 tp->serdes_counter--;
5608 u32 val = serdes_cfg;
5615 tw32_f(MAC_SERDES_CFG, val);
5618 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5621 /* Link parallel detection - link is up */
5622 /* only if we have PCS_SYNC and not */
5623 /* receiving config code words */
5624 mac_status = tr32(MAC_STATUS);
5625 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5626 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5627 tg3_setup_flow_control(tp, 0, 0);
5628 current_link_up = true;
5630 TG3_PHYFLG_PARALLEL_DETECT;
5631 tp->serdes_counter =
5632 SERDES_PARALLEL_DET_TIMEOUT;
5634 goto restart_autoneg;
5638 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5639 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5643 return current_link_up;
5646 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5648 bool current_link_up = false;
5650 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5653 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5654 u32 txflags, rxflags;
5657 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5658 u32 local_adv = 0, remote_adv = 0;
5660 if (txflags & ANEG_CFG_PS1)
5661 local_adv |= ADVERTISE_1000XPAUSE;
5662 if (txflags & ANEG_CFG_PS2)
5663 local_adv |= ADVERTISE_1000XPSE_ASYM;
5665 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5666 remote_adv |= LPA_1000XPAUSE;
5667 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5668 remote_adv |= LPA_1000XPAUSE_ASYM;
5670 tp->link_config.rmt_adv =
5671 mii_adv_to_ethtool_adv_x(remote_adv);
5673 tg3_setup_flow_control(tp, local_adv, remote_adv);
5675 current_link_up = true;
5677 for (i = 0; i < 30; i++) {
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED));
5683 if ((tr32(MAC_STATUS) &
5684 (MAC_STATUS_SYNC_CHANGED |
5685 MAC_STATUS_CFG_CHANGED)) == 0)
5689 mac_status = tr32(MAC_STATUS);
5690 if (!current_link_up &&
5691 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5692 !(mac_status & MAC_STATUS_RCVD_CFG))
5693 current_link_up = true;
5695 tg3_setup_flow_control(tp, 0, 0);
5697 /* Forcing 1000FD link up. */
5698 current_link_up = true;
5700 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5703 tw32_f(MAC_MODE, tp->mac_mode);
5708 return current_link_up;
5711 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5714 u16 orig_active_speed;
5715 u8 orig_active_duplex;
5717 bool current_link_up;
5720 orig_pause_cfg = tp->link_config.active_flowctrl;
5721 orig_active_speed = tp->link_config.active_speed;
5722 orig_active_duplex = tp->link_config.active_duplex;
5724 if (!tg3_flag(tp, HW_AUTONEG) &&
5726 tg3_flag(tp, INIT_COMPLETE)) {
5727 mac_status = tr32(MAC_STATUS);
5728 mac_status &= (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET |
5730 MAC_STATUS_CFG_CHANGED |
5731 MAC_STATUS_RCVD_CFG);
5732 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5733 MAC_STATUS_SIGNAL_DET)) {
5734 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735 MAC_STATUS_CFG_CHANGED));
5740 tw32_f(MAC_TX_AUTO_NEG, 0);
5742 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5743 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5744 tw32_f(MAC_MODE, tp->mac_mode);
5747 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5748 tg3_init_bcm8002(tp);
5750 /* Enable link change event even when serdes polling. */
5751 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5754 current_link_up = false;
5755 tp->link_config.rmt_adv = 0;
5756 mac_status = tr32(MAC_STATUS);
5758 if (tg3_flag(tp, HW_AUTONEG))
5759 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5761 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5763 tp->napi[0].hw_status->status =
5764 (SD_STATUS_UPDATED |
5765 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5767 for (i = 0; i < 100; i++) {
5768 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5769 MAC_STATUS_CFG_CHANGED));
5771 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5772 MAC_STATUS_CFG_CHANGED |
5773 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5777 mac_status = tr32(MAC_STATUS);
5778 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5779 current_link_up = false;
5780 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5781 tp->serdes_counter == 0) {
5782 tw32_f(MAC_MODE, (tp->mac_mode |
5783 MAC_MODE_SEND_CONFIGS));
5785 tw32_f(MAC_MODE, tp->mac_mode);
5789 if (current_link_up) {
5790 tp->link_config.active_speed = SPEED_1000;
5791 tp->link_config.active_duplex = DUPLEX_FULL;
5792 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 LED_CTRL_LNKLED_OVERRIDE |
5794 LED_CTRL_1000MBPS_ON));
5796 tp->link_config.active_speed = SPEED_UNKNOWN;
5797 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5798 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5799 LED_CTRL_LNKLED_OVERRIDE |
5800 LED_CTRL_TRAFFIC_OVERRIDE));
5803 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5804 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5805 if (orig_pause_cfg != now_pause_cfg ||
5806 orig_active_speed != tp->link_config.active_speed ||
5807 orig_active_duplex != tp->link_config.active_duplex)
5808 tg3_link_report(tp);
5814 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5818 u16 current_speed = SPEED_UNKNOWN;
5819 u8 current_duplex = DUPLEX_UNKNOWN;
5820 bool current_link_up = false;
5821 u32 local_adv, remote_adv, sgsr;
5823 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5824 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5825 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5826 (sgsr & SERDES_TG3_SGMII_MODE)) {
5831 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5833 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 current_link_up = true;
5837 if (sgsr & SERDES_TG3_SPEED_1000) {
5838 current_speed = SPEED_1000;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5840 } else if (sgsr & SERDES_TG3_SPEED_100) {
5841 current_speed = SPEED_100;
5842 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5844 current_speed = SPEED_10;
5845 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5848 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5849 current_duplex = DUPLEX_FULL;
5851 current_duplex = DUPLEX_HALF;
5854 tw32_f(MAC_MODE, tp->mac_mode);
5857 tg3_clear_mac_status(tp);
5859 goto fiber_setup_done;
5862 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5863 tw32_f(MAC_MODE, tp->mac_mode);
5866 tg3_clear_mac_status(tp);
5871 tp->link_config.rmt_adv = 0;
5873 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877 bmsr |= BMSR_LSTATUS;
5879 bmsr &= ~BMSR_LSTATUS;
5882 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5884 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5885 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886 /* do nothing, just check for link up at the end */
5887 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5890 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5891 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5892 ADVERTISE_1000XPAUSE |
5893 ADVERTISE_1000XPSE_ASYM |
5896 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5897 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5899 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5900 tg3_writephy(tp, MII_ADVERTISE, newadv);
5901 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5902 tg3_writephy(tp, MII_BMCR, bmcr);
5904 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5905 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5906 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5913 bmcr &= ~BMCR_SPEED1000;
5914 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5916 if (tp->link_config.duplex == DUPLEX_FULL)
5917 new_bmcr |= BMCR_FULLDPLX;
5919 if (new_bmcr != bmcr) {
5920 /* BMCR_SPEED1000 is a reserved bit that needs
5921 * to be set on write.
5923 new_bmcr |= BMCR_SPEED1000;
5925 /* Force a linkdown */
5929 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5930 adv &= ~(ADVERTISE_1000XFULL |
5931 ADVERTISE_1000XHALF |
5933 tg3_writephy(tp, MII_ADVERTISE, adv);
5934 tg3_writephy(tp, MII_BMCR, bmcr |
5938 tg3_carrier_off(tp);
5940 tg3_writephy(tp, MII_BMCR, new_bmcr);
5942 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5943 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5945 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5946 bmsr |= BMSR_LSTATUS;
5948 bmsr &= ~BMSR_LSTATUS;
5950 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5954 if (bmsr & BMSR_LSTATUS) {
5955 current_speed = SPEED_1000;
5956 current_link_up = true;
5957 if (bmcr & BMCR_FULLDPLX)
5958 current_duplex = DUPLEX_FULL;
5960 current_duplex = DUPLEX_HALF;
5965 if (bmcr & BMCR_ANENABLE) {
5968 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5969 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5970 common = local_adv & remote_adv;
5971 if (common & (ADVERTISE_1000XHALF |
5972 ADVERTISE_1000XFULL)) {
5973 if (common & ADVERTISE_1000XFULL)
5974 current_duplex = DUPLEX_FULL;
5976 current_duplex = DUPLEX_HALF;
5978 tp->link_config.rmt_adv =
5979 mii_adv_to_ethtool_adv_x(remote_adv);
5980 } else if (!tg3_flag(tp, 5780_CLASS)) {
5981 /* Link is up via parallel detect */
5983 current_link_up = false;
5989 if (current_link_up && current_duplex == DUPLEX_FULL)
5990 tg3_setup_flow_control(tp, local_adv, remote_adv);
5992 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5993 if (tp->link_config.active_duplex == DUPLEX_HALF)
5994 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5996 tw32_f(MAC_MODE, tp->mac_mode);
5999 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6001 tp->link_config.active_speed = current_speed;
6002 tp->link_config.active_duplex = current_duplex;
6004 tg3_test_and_report_link_chg(tp, current_link_up);
6008 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6010 if (tp->serdes_counter) {
6011 /* Give autoneg time to complete. */
6012 tp->serdes_counter--;
6017 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6020 tg3_readphy(tp, MII_BMCR, &bmcr);
6021 if (bmcr & BMCR_ANENABLE) {
6024 /* Select shadow register 0x1f */
6025 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6026 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6028 /* Select expansion interrupt status register */
6029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6030 MII_TG3_DSP_EXP1_INT_STAT);
6031 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6032 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6035 /* We have signal detect and not receiving
6036 * config code words, link is up by parallel
6040 bmcr &= ~BMCR_ANENABLE;
6041 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6042 tg3_writephy(tp, MII_BMCR, bmcr);
6043 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6046 } else if (tp->link_up &&
6047 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6048 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6051 /* Select expansion interrupt status register */
6052 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6053 MII_TG3_DSP_EXP1_INT_STAT);
6054 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6058 /* Config code words received, turn on autoneg. */
6059 tg3_readphy(tp, MII_BMCR, &bmcr);
6060 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6062 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6068 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6073 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6074 err = tg3_setup_fiber_phy(tp, force_reset);
6075 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6076 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6078 err = tg3_setup_copper_phy(tp, force_reset);
6080 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6083 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6084 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6086 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6091 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6092 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6093 tw32(GRC_MISC_CFG, val);
6096 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6097 (6 << TX_LENGTHS_IPG_SHIFT);
6098 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6099 tg3_asic_rev(tp) == ASIC_REV_5762)
6100 val |= tr32(MAC_TX_LENGTHS) &
6101 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6102 TX_LENGTHS_CNT_DWN_VAL_MSK);
6104 if (tp->link_config.active_speed == SPEED_1000 &&
6105 tp->link_config.active_duplex == DUPLEX_HALF)
6106 tw32(MAC_TX_LENGTHS, val |
6107 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6109 tw32(MAC_TX_LENGTHS, val |
6110 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6112 if (!tg3_flag(tp, 5705_PLUS)) {
6114 tw32(HOSTCC_STAT_COAL_TICKS,
6115 tp->coal.stats_block_coalesce_usecs);
6117 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6121 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6122 val = tr32(PCIE_PWR_MGMT_THRESH);
6124 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6127 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6128 tw32(PCIE_PWR_MGMT_THRESH, val);
6134 /* tp->lock must be held */
6135 static u64 tg3_refclk_read(struct tg3 *tp)
6137 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6138 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 struct tg3 *tp = netdev_priv(dev);
6158 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 SOF_TIMESTAMPING_RX_SOFTWARE |
6160 SOF_TIMESTAMPING_SOFTWARE;
6162 if (tg3_flag(tp, PTP_CAPABLE)) {
6163 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 SOF_TIMESTAMPING_RX_HARDWARE |
6165 SOF_TIMESTAMPING_RAW_HARDWARE;
6169 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 info->phc_index = -1;
6173 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185 bool neg_adj = false;
6193 /* Frequency adjustment is performed using hardware with a 24 bit
6194 * accumulator and a programmable correction value. On each clk, the
6195 * correction value gets added to the accumulator and when it
6196 * overflows, the time counter is incremented/decremented.
6198 * So conversion from ppb to correction value is
6199 * ppb * (1 << 24) / 1000000000
6201 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202 TG3_EAV_REF_CLK_CORRECT_MASK;
6204 tg3_full_lock(tp, 0);
6207 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208 TG3_EAV_REF_CLK_CORRECT_EN |
6209 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6211 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213 tg3_full_unlock(tp);
6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222 tg3_full_lock(tp, 0);
6223 tp->ptp_adjust += delta;
6224 tg3_full_unlock(tp);
6229 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6232 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234 tg3_full_lock(tp, 0);
6235 ns = tg3_refclk_read(tp);
6236 ns += tp->ptp_adjust;
6237 tg3_full_unlock(tp);
6239 *ts = ns_to_timespec64(ns);
6244 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6245 const struct timespec64 *ts)
6248 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6250 ns = timespec64_to_ns(ts);
6252 tg3_full_lock(tp, 0);
6253 tg3_refclk_write(tp, ns);
6255 tg3_full_unlock(tp);
6260 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6261 struct ptp_clock_request *rq, int on)
6263 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6268 case PTP_CLK_REQ_PEROUT:
6269 if (rq->perout.index != 0)
6272 tg3_full_lock(tp, 0);
6273 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6274 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6279 nsec = rq->perout.start.sec * 1000000000ULL +
6280 rq->perout.start.nsec;
6282 if (rq->perout.period.sec || rq->perout.period.nsec) {
6283 netdev_warn(tp->dev,
6284 "Device supports only a one-shot timesync output, period must be 0\n");
6289 if (nsec & (1ULL << 63)) {
6290 netdev_warn(tp->dev,
6291 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6296 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6297 tw32(TG3_EAV_WATCHDOG0_MSB,
6298 TG3_EAV_WATCHDOG0_EN |
6299 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6301 tw32(TG3_EAV_REF_CLCK_CTL,
6302 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6304 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6305 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6309 tg3_full_unlock(tp);
6319 static const struct ptp_clock_info tg3_ptp_caps = {
6320 .owner = THIS_MODULE,
6321 .name = "tg3 clock",
6322 .max_adj = 250000000,
6328 .adjfreq = tg3_ptp_adjfreq,
6329 .adjtime = tg3_ptp_adjtime,
6330 .gettime64 = tg3_ptp_gettime,
6331 .settime64 = tg3_ptp_settime,
6332 .enable = tg3_ptp_enable,
6335 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6336 struct skb_shared_hwtstamps *timestamp)
6338 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6339 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6343 /* tp->lock must be held */
6344 static void tg3_ptp_init(struct tg3 *tp)
6346 if (!tg3_flag(tp, PTP_CAPABLE))
6349 /* Initialize the hardware clock to the system time. */
6350 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6352 tp->ptp_info = tg3_ptp_caps;
6355 /* tp->lock must be held */
6356 static void tg3_ptp_resume(struct tg3 *tp)
6358 if (!tg3_flag(tp, PTP_CAPABLE))
6361 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6365 static void tg3_ptp_fini(struct tg3 *tp)
6367 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6370 ptp_clock_unregister(tp->ptp_clock);
6371 tp->ptp_clock = NULL;
6375 static inline int tg3_irq_sync(struct tg3 *tp)
6377 return tp->irq_sync;
6380 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6384 dst = (u32 *)((u8 *)dst + off);
6385 for (i = 0; i < len; i += sizeof(u32))
6386 *dst++ = tr32(off + i);
6389 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6391 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6392 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6393 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6394 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6395 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6396 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6397 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6398 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6399 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6400 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6402 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6403 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6404 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6405 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6406 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6407 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6408 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6409 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6411 if (tg3_flag(tp, SUPPORT_MSIX))
6412 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6414 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6415 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6416 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6417 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6418 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6420 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6421 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6423 if (!tg3_flag(tp, 5705_PLUS)) {
6424 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6425 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6426 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6429 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6430 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6431 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6432 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6433 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6435 if (tg3_flag(tp, NVRAM))
6436 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6439 static void tg3_dump_state(struct tg3 *tp)
6444 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6448 if (tg3_flag(tp, PCI_EXPRESS)) {
6449 /* Read up to but not including private PCI registers */
6450 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6451 regs[i / sizeof(u32)] = tr32(i);
6453 tg3_dump_legacy_regs(tp, regs);
6455 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6456 if (!regs[i + 0] && !regs[i + 1] &&
6457 !regs[i + 2] && !regs[i + 3])
6460 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6462 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6467 for (i = 0; i < tp->irq_cnt; i++) {
6468 struct tg3_napi *tnapi = &tp->napi[i];
6470 /* SW status block */
6472 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6474 tnapi->hw_status->status,
6475 tnapi->hw_status->status_tag,
6476 tnapi->hw_status->rx_jumbo_consumer,
6477 tnapi->hw_status->rx_consumer,
6478 tnapi->hw_status->rx_mini_consumer,
6479 tnapi->hw_status->idx[0].rx_producer,
6480 tnapi->hw_status->idx[0].tx_consumer);
6483 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6485 tnapi->last_tag, tnapi->last_irq_tag,
6486 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6488 tnapi->prodring.rx_std_prod_idx,
6489 tnapi->prodring.rx_std_cons_idx,
6490 tnapi->prodring.rx_jmb_prod_idx,
6491 tnapi->prodring.rx_jmb_cons_idx);
6495 /* This is called whenever we suspect that the system chipset is re-
6496 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6497 * is bogus tx completions. We try to recover by setting the
6498 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6501 static void tg3_tx_recover(struct tg3 *tp)
6503 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6504 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6506 netdev_warn(tp->dev,
6507 "The system may be re-ordering memory-mapped I/O "
6508 "cycles to the network device, attempting to recover. "
6509 "Please report the problem to the driver maintainer "
6510 "and include system chipset information.\n");
6512 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6515 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6517 /* Tell compiler to fetch tx indices from memory. */
6519 return tnapi->tx_pending -
6520 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6523 /* Tigon3 never reports partial packet sends. So we do not
6524 * need special logic to handle SKBs that have not had all
6525 * of their frags sent yet, like SunGEM does.
6527 static void tg3_tx(struct tg3_napi *tnapi)
6529 struct tg3 *tp = tnapi->tp;
6530 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6531 u32 sw_idx = tnapi->tx_cons;
6532 struct netdev_queue *txq;
6533 int index = tnapi - tp->napi;
6534 unsigned int pkts_compl = 0, bytes_compl = 0;
6536 if (tg3_flag(tp, ENABLE_TSS))
6539 txq = netdev_get_tx_queue(tp->dev, index);
6541 while (sw_idx != hw_idx) {
6542 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6543 struct sk_buff *skb = ri->skb;
6546 if (unlikely(skb == NULL)) {
6551 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6552 struct skb_shared_hwtstamps timestamp;
6553 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6554 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6556 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6558 skb_tstamp_tx(skb, ×tamp);
6561 pci_unmap_single(tp->pdev,
6562 dma_unmap_addr(ri, mapping),
6568 while (ri->fragmented) {
6569 ri->fragmented = false;
6570 sw_idx = NEXT_TX(sw_idx);
6571 ri = &tnapi->tx_buffers[sw_idx];
6574 sw_idx = NEXT_TX(sw_idx);
6576 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6577 ri = &tnapi->tx_buffers[sw_idx];
6578 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6581 pci_unmap_page(tp->pdev,
6582 dma_unmap_addr(ri, mapping),
6583 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6586 while (ri->fragmented) {
6587 ri->fragmented = false;
6588 sw_idx = NEXT_TX(sw_idx);
6589 ri = &tnapi->tx_buffers[sw_idx];
6592 sw_idx = NEXT_TX(sw_idx);
6596 bytes_compl += skb->len;
6598 dev_consume_skb_any(skb);
6600 if (unlikely(tx_bug)) {
6606 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6608 tnapi->tx_cons = sw_idx;
6610 /* Need to make the tx_cons update visible to tg3_start_xmit()
6611 * before checking for netif_queue_stopped(). Without the
6612 * memory barrier, there is a small possibility that tg3_start_xmit()
6613 * will miss it and cause the queue to be stopped forever.
6617 if (unlikely(netif_tx_queue_stopped(txq) &&
6618 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6619 __netif_tx_lock(txq, smp_processor_id());
6620 if (netif_tx_queue_stopped(txq) &&
6621 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6622 netif_tx_wake_queue(txq);
6623 __netif_tx_unlock(txq);
6627 static void tg3_frag_free(bool is_frag, void *data)
6630 skb_free_frag(data);
6635 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6637 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6638 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6643 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6644 map_sz, PCI_DMA_FROMDEVICE);
6645 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6650 /* Returns size of skb allocated or < 0 on error.
6652 * We only need to fill in the address because the other members
6653 * of the RX descriptor are invariant, see tg3_init_rings.
6655 * Note the purposeful assymetry of cpu vs. chip accesses. For
6656 * posting buffers we only dirty the first cache line of the RX
6657 * descriptor (containing the address). Whereas for the RX status
6658 * buffers the cpu only reads the last cacheline of the RX descriptor
6659 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6661 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6662 u32 opaque_key, u32 dest_idx_unmasked,
6663 unsigned int *frag_size)
6665 struct tg3_rx_buffer_desc *desc;
6666 struct ring_info *map;
6669 int skb_size, data_size, dest_idx;
6671 switch (opaque_key) {
6672 case RXD_OPAQUE_RING_STD:
6673 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6674 desc = &tpr->rx_std[dest_idx];
6675 map = &tpr->rx_std_buffers[dest_idx];
6676 data_size = tp->rx_pkt_map_sz;
6679 case RXD_OPAQUE_RING_JUMBO:
6680 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6681 desc = &tpr->rx_jmb[dest_idx].std;
6682 map = &tpr->rx_jmb_buffers[dest_idx];
6683 data_size = TG3_RX_JMB_MAP_SZ;
6690 /* Do not overwrite any of the map or rp information
6691 * until we are sure we can commit to a new buffer.
6693 * Callers depend upon this behavior and assume that
6694 * we leave everything unchanged if we fail.
6696 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6697 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6698 if (skb_size <= PAGE_SIZE) {
6699 data = netdev_alloc_frag(skb_size);
6700 *frag_size = skb_size;
6702 data = kmalloc(skb_size, GFP_ATOMIC);
6708 mapping = pci_map_single(tp->pdev,
6709 data + TG3_RX_OFFSET(tp),
6711 PCI_DMA_FROMDEVICE);
6712 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6713 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6718 dma_unmap_addr_set(map, mapping, mapping);
6720 desc->addr_hi = ((u64)mapping >> 32);
6721 desc->addr_lo = ((u64)mapping & 0xffffffff);
6726 /* We only need to move over in the address because the other
6727 * members of the RX descriptor are invariant. See notes above
6728 * tg3_alloc_rx_data for full details.
6730 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6731 struct tg3_rx_prodring_set *dpr,
6732 u32 opaque_key, int src_idx,
6733 u32 dest_idx_unmasked)
6735 struct tg3 *tp = tnapi->tp;
6736 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6737 struct ring_info *src_map, *dest_map;
6738 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6741 switch (opaque_key) {
6742 case RXD_OPAQUE_RING_STD:
6743 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6744 dest_desc = &dpr->rx_std[dest_idx];
6745 dest_map = &dpr->rx_std_buffers[dest_idx];
6746 src_desc = &spr->rx_std[src_idx];
6747 src_map = &spr->rx_std_buffers[src_idx];
6750 case RXD_OPAQUE_RING_JUMBO:
6751 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6752 dest_desc = &dpr->rx_jmb[dest_idx].std;
6753 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6754 src_desc = &spr->rx_jmb[src_idx].std;
6755 src_map = &spr->rx_jmb_buffers[src_idx];
6762 dest_map->data = src_map->data;
6763 dma_unmap_addr_set(dest_map, mapping,
6764 dma_unmap_addr(src_map, mapping));
6765 dest_desc->addr_hi = src_desc->addr_hi;
6766 dest_desc->addr_lo = src_desc->addr_lo;
6768 /* Ensure that the update to the skb happens after the physical
6769 * addresses have been transferred to the new BD location.
6773 src_map->data = NULL;
6776 /* The RX ring scheme is composed of multiple rings which post fresh
6777 * buffers to the chip, and one special ring the chip uses to report
6778 * status back to the host.
6780 * The special ring reports the status of received packets to the
6781 * host. The chip does not write into the original descriptor the
6782 * RX buffer was obtained from. The chip simply takes the original
6783 * descriptor as provided by the host, updates the status and length
6784 * field, then writes this into the next status ring entry.
6786 * Each ring the host uses to post buffers to the chip is described
6787 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6788 * it is first placed into the on-chip ram. When the packet's length
6789 * is known, it walks down the TG3_BDINFO entries to select the ring.
6790 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6791 * which is within the range of the new packet's length is chosen.
6793 * The "separate ring for rx status" scheme may sound queer, but it makes
6794 * sense from a cache coherency perspective. If only the host writes
6795 * to the buffer post rings, and only the chip writes to the rx status
6796 * rings, then cache lines never move beyond shared-modified state.
6797 * If both the host and chip were to write into the same ring, cache line
6798 * eviction could occur since both entities want it in an exclusive state.
6800 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6802 struct tg3 *tp = tnapi->tp;
6803 u32 work_mask, rx_std_posted = 0;
6804 u32 std_prod_idx, jmb_prod_idx;
6805 u32 sw_idx = tnapi->rx_rcb_ptr;
6808 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6810 hw_idx = *(tnapi->rx_rcb_prod_idx);
6812 * We need to order the read of hw_idx and the read of
6813 * the opaque cookie.
6818 std_prod_idx = tpr->rx_std_prod_idx;
6819 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6820 while (sw_idx != hw_idx && budget > 0) {
6821 struct ring_info *ri;
6822 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6824 struct sk_buff *skb;
6825 dma_addr_t dma_addr;
6826 u32 opaque_key, desc_idx, *post_ptr;
6830 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6831 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6832 if (opaque_key == RXD_OPAQUE_RING_STD) {
6833 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6836 post_ptr = &std_prod_idx;
6838 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6839 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6840 dma_addr = dma_unmap_addr(ri, mapping);
6842 post_ptr = &jmb_prod_idx;
6844 goto next_pkt_nopost;
6846 work_mask |= opaque_key;
6848 if (desc->err_vlan & RXD_ERR_MASK) {
6850 tg3_recycle_rx(tnapi, tpr, opaque_key,
6851 desc_idx, *post_ptr);
6853 /* Other statistics kept track of by card. */
6858 prefetch(data + TG3_RX_OFFSET(tp));
6859 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6862 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863 RXD_FLAG_PTPSTAT_PTPV1 ||
6864 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865 RXD_FLAG_PTPSTAT_PTPV2) {
6866 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6867 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6870 if (len > TG3_RX_COPY_THRESH(tp)) {
6872 unsigned int frag_size;
6874 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6875 *post_ptr, &frag_size);
6879 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6880 PCI_DMA_FROMDEVICE);
6882 /* Ensure that the update to the data happens
6883 * after the usage of the old DMA mapping.
6889 skb = build_skb(data, frag_size);
6891 tg3_frag_free(frag_size != 0, data);
6892 goto drop_it_no_recycle;
6894 skb_reserve(skb, TG3_RX_OFFSET(tp));
6896 tg3_recycle_rx(tnapi, tpr, opaque_key,
6897 desc_idx, *post_ptr);
6899 skb = netdev_alloc_skb(tp->dev,
6900 len + TG3_RAW_IP_ALIGN);
6902 goto drop_it_no_recycle;
6904 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6905 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6907 data + TG3_RX_OFFSET(tp),
6909 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6914 tg3_hwclock_to_timestamp(tp, tstamp,
6915 skb_hwtstamps(skb));
6917 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6918 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6919 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6920 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6921 skb->ip_summed = CHECKSUM_UNNECESSARY;
6923 skb_checksum_none_assert(skb);
6925 skb->protocol = eth_type_trans(skb, tp->dev);
6927 if (len > (tp->dev->mtu + ETH_HLEN) &&
6928 skb->protocol != htons(ETH_P_8021Q) &&
6929 skb->protocol != htons(ETH_P_8021AD)) {
6930 dev_kfree_skb_any(skb);
6931 goto drop_it_no_recycle;
6934 if (desc->type_flags & RXD_FLAG_VLAN &&
6935 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6937 desc->err_vlan & RXD_VLAN_MASK);
6939 napi_gro_receive(&tnapi->napi, skb);
6947 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6948 tpr->rx_std_prod_idx = std_prod_idx &
6949 tp->rx_std_ring_mask;
6950 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6951 tpr->rx_std_prod_idx);
6952 work_mask &= ~RXD_OPAQUE_RING_STD;
6957 sw_idx &= tp->rx_ret_ring_mask;
6959 /* Refresh hw_idx to see if there is new work */
6960 if (sw_idx == hw_idx) {
6961 hw_idx = *(tnapi->rx_rcb_prod_idx);
6966 /* ACK the status ring. */
6967 tnapi->rx_rcb_ptr = sw_idx;
6968 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6970 /* Refill RX ring(s). */
6971 if (!tg3_flag(tp, ENABLE_RSS)) {
6972 /* Sync BD data before updating mailbox */
6975 if (work_mask & RXD_OPAQUE_RING_STD) {
6976 tpr->rx_std_prod_idx = std_prod_idx &
6977 tp->rx_std_ring_mask;
6978 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6979 tpr->rx_std_prod_idx);
6981 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6982 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6983 tp->rx_jmb_ring_mask;
6984 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6985 tpr->rx_jmb_prod_idx);
6988 } else if (work_mask) {
6989 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6990 * updated before the producer indices can be updated.
6994 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6995 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6997 if (tnapi != &tp->napi[1]) {
6998 tp->rx_refill = true;
6999 napi_schedule(&tp->napi[1].napi);
7006 static void tg3_poll_link(struct tg3 *tp)
7008 /* handle link change and other phy events */
7009 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7010 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7012 if (sblk->status & SD_STATUS_LINK_CHG) {
7013 sblk->status = SD_STATUS_UPDATED |
7014 (sblk->status & ~SD_STATUS_LINK_CHG);
7015 spin_lock(&tp->lock);
7016 if (tg3_flag(tp, USE_PHYLIB)) {
7018 (MAC_STATUS_SYNC_CHANGED |
7019 MAC_STATUS_CFG_CHANGED |
7020 MAC_STATUS_MI_COMPLETION |
7021 MAC_STATUS_LNKSTATE_CHANGED));
7024 tg3_setup_phy(tp, false);
7025 spin_unlock(&tp->lock);
7030 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7031 struct tg3_rx_prodring_set *dpr,
7032 struct tg3_rx_prodring_set *spr)
7034 u32 si, di, cpycnt, src_prod_idx;
7038 src_prod_idx = spr->rx_std_prod_idx;
7040 /* Make sure updates to the rx_std_buffers[] entries and the
7041 * standard producer index are seen in the correct order.
7045 if (spr->rx_std_cons_idx == src_prod_idx)
7048 if (spr->rx_std_cons_idx < src_prod_idx)
7049 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7051 cpycnt = tp->rx_std_ring_mask + 1 -
7052 spr->rx_std_cons_idx;
7054 cpycnt = min(cpycnt,
7055 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7057 si = spr->rx_std_cons_idx;
7058 di = dpr->rx_std_prod_idx;
7060 for (i = di; i < di + cpycnt; i++) {
7061 if (dpr->rx_std_buffers[i].data) {
7071 /* Ensure that updates to the rx_std_buffers ring and the
7072 * shadowed hardware producer ring from tg3_recycle_skb() are
7073 * ordered correctly WRT the skb check above.
7077 memcpy(&dpr->rx_std_buffers[di],
7078 &spr->rx_std_buffers[si],
7079 cpycnt * sizeof(struct ring_info));
7081 for (i = 0; i < cpycnt; i++, di++, si++) {
7082 struct tg3_rx_buffer_desc *sbd, *dbd;
7083 sbd = &spr->rx_std[si];
7084 dbd = &dpr->rx_std[di];
7085 dbd->addr_hi = sbd->addr_hi;
7086 dbd->addr_lo = sbd->addr_lo;
7089 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7090 tp->rx_std_ring_mask;
7091 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7092 tp->rx_std_ring_mask;
7096 src_prod_idx = spr->rx_jmb_prod_idx;
7098 /* Make sure updates to the rx_jmb_buffers[] entries and
7099 * the jumbo producer index are seen in the correct order.
7103 if (spr->rx_jmb_cons_idx == src_prod_idx)
7106 if (spr->rx_jmb_cons_idx < src_prod_idx)
7107 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7109 cpycnt = tp->rx_jmb_ring_mask + 1 -
7110 spr->rx_jmb_cons_idx;
7112 cpycnt = min(cpycnt,
7113 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7115 si = spr->rx_jmb_cons_idx;
7116 di = dpr->rx_jmb_prod_idx;
7118 for (i = di; i < di + cpycnt; i++) {
7119 if (dpr->rx_jmb_buffers[i].data) {
7129 /* Ensure that updates to the rx_jmb_buffers ring and the
7130 * shadowed hardware producer ring from tg3_recycle_skb() are
7131 * ordered correctly WRT the skb check above.
7135 memcpy(&dpr->rx_jmb_buffers[di],
7136 &spr->rx_jmb_buffers[si],
7137 cpycnt * sizeof(struct ring_info));
7139 for (i = 0; i < cpycnt; i++, di++, si++) {
7140 struct tg3_rx_buffer_desc *sbd, *dbd;
7141 sbd = &spr->rx_jmb[si].std;
7142 dbd = &dpr->rx_jmb[di].std;
7143 dbd->addr_hi = sbd->addr_hi;
7144 dbd->addr_lo = sbd->addr_lo;
7147 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7148 tp->rx_jmb_ring_mask;
7149 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7150 tp->rx_jmb_ring_mask;
7156 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7158 struct tg3 *tp = tnapi->tp;
7160 /* run TX completion thread */
7161 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7163 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7167 if (!tnapi->rx_rcb_prod_idx)
7170 /* run RX thread, within the bounds set by NAPI.
7171 * All RX "locking" is done by ensuring outside
7172 * code synchronizes with tg3->napi.poll()
7174 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7175 work_done += tg3_rx(tnapi, budget - work_done);
7177 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7178 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7180 u32 std_prod_idx = dpr->rx_std_prod_idx;
7181 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7183 tp->rx_refill = false;
7184 for (i = 1; i <= tp->rxq_cnt; i++)
7185 err |= tg3_rx_prodring_xfer(tp, dpr,
7186 &tp->napi[i].prodring);
7190 if (std_prod_idx != dpr->rx_std_prod_idx)
7191 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7192 dpr->rx_std_prod_idx);
7194 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7195 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7196 dpr->rx_jmb_prod_idx);
7201 tw32_f(HOSTCC_MODE, tp->coal_now);
7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7209 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210 schedule_work(&tp->reset_task);
7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7215 cancel_work_sync(&tp->reset_task);
7216 tg3_flag_clear(tp, RESET_TASK_PENDING);
7217 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7222 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223 struct tg3 *tp = tnapi->tp;
7225 struct tg3_hw_status *sblk = tnapi->hw_status;
7228 work_done = tg3_poll_work(tnapi, work_done, budget);
7230 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7233 if (unlikely(work_done >= budget))
7236 /* tp->last_tag is used in tg3_int_reenable() below
7237 * to tell the hw how much work has been processed,
7238 * so we must read it before checking for more work.
7240 tnapi->last_tag = sblk->status_tag;
7241 tnapi->last_irq_tag = tnapi->last_tag;
7244 /* check for RX/TX work to do */
7245 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7248 /* This test here is not race free, but will reduce
7249 * the number of interrupts by looping again.
7251 if (tnapi == &tp->napi[1] && tp->rx_refill)
7254 napi_complete_done(napi, work_done);
7255 /* Reenable interrupts. */
7256 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7258 /* This test here is synchronized by napi_schedule()
7259 * and napi_complete() to close the race condition.
7261 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262 tw32(HOSTCC_MODE, tp->coalesce_mode |
7263 HOSTCC_MODE_ENABLE |
7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7275 /* work_done is guaranteed to be less than budget. */
7276 napi_complete(napi);
7277 tg3_reset_task_schedule(tp);
7281 static void tg3_process_error(struct tg3 *tp)
7284 bool real_error = false;
7286 if (tg3_flag(tp, ERROR_PROCESSED))
7289 /* Check Flow Attention register */
7290 val = tr32(HOSTCC_FLOW_ATTN);
7291 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7296 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7301 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7311 tg3_flag_set(tp, ERROR_PROCESSED);
7312 tg3_reset_task_schedule(tp);
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7317 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318 struct tg3 *tp = tnapi->tp;
7320 struct tg3_hw_status *sblk = tnapi->hw_status;
7323 if (sblk->status & SD_STATUS_ERROR)
7324 tg3_process_error(tp);
7328 work_done = tg3_poll_work(tnapi, work_done, budget);
7330 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7333 if (unlikely(work_done >= budget))
7336 if (tg3_flag(tp, TAGGED_STATUS)) {
7337 /* tp->last_tag is used in tg3_int_reenable() below
7338 * to tell the hw how much work has been processed,
7339 * so we must read it before checking for more work.
7341 tnapi->last_tag = sblk->status_tag;
7342 tnapi->last_irq_tag = tnapi->last_tag;
7345 sblk->status &= ~SD_STATUS_UPDATED;
7347 if (likely(!tg3_has_work(tnapi))) {
7348 napi_complete_done(napi, work_done);
7349 tg3_int_reenable(tnapi);
7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7358 /* work_done is guaranteed to be less than budget. */
7359 napi_complete(napi);
7360 tg3_reset_task_schedule(tp);
7364 static void tg3_napi_disable(struct tg3 *tp)
7368 for (i = tp->irq_cnt - 1; i >= 0; i--)
7369 napi_disable(&tp->napi[i].napi);
7372 static void tg3_napi_enable(struct tg3 *tp)
7376 for (i = 0; i < tp->irq_cnt; i++)
7377 napi_enable(&tp->napi[i].napi);
7380 static void tg3_napi_init(struct tg3 *tp)
7384 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385 for (i = 1; i < tp->irq_cnt; i++)
7386 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7389 static void tg3_napi_fini(struct tg3 *tp)
7393 for (i = 0; i < tp->irq_cnt; i++)
7394 netif_napi_del(&tp->napi[i].napi);
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7399 netif_trans_update(tp->dev); /* prevent tx timeout */
7400 tg3_napi_disable(tp);
7401 netif_carrier_off(tp->dev);
7402 netif_tx_disable(tp->dev);
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7410 /* NOTE: unconditional netif_tx_wake_all_queues is only
7411 * appropriate so long as all callers are assured to
7412 * have free tx slots (such as after tg3_init_hw)
7414 netif_tx_wake_all_queues(tp->dev);
7417 netif_carrier_on(tp->dev);
7419 tg3_napi_enable(tp);
7420 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421 tg3_enable_ints(tp);
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425 __releases(tp->lock)
7426 __acquires(tp->lock)
7430 BUG_ON(tp->irq_sync);
7435 spin_unlock_bh(&tp->lock);
7437 for (i = 0; i < tp->irq_cnt; i++)
7438 synchronize_irq(tp->napi[i].irq_vec);
7440 spin_lock_bh(&tp->lock);
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445 * with as well. Most of the time, this is not necessary except when
7446 * shutting down the device.
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7450 spin_lock_bh(&tp->lock);
7452 tg3_irq_quiesce(tp);
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7457 spin_unlock_bh(&tp->lock);
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461 * after sending MSI so driver doesn't have to do it.
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7468 prefetch(tnapi->hw_status);
7470 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7472 if (likely(!tg3_irq_sync(tp)))
7473 napi_schedule(&tnapi->napi);
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479 * flush status block and interrupt mailbox. PCI ordering rules
7480 * guarantee that MSI will arrive after the status block.
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7484 struct tg3_napi *tnapi = dev_id;
7485 struct tg3 *tp = tnapi->tp;
7487 prefetch(tnapi->hw_status);
7489 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7491 * Writing any value to intr-mbox-0 clears PCI INTA# and
7492 * chip-internal interrupt pending events.
7493 * Writing non-zero to intr-mbox-0 additional tells the
7494 * NIC to stop sending us irqs, engaging "in-intr-handler"
7497 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498 if (likely(!tg3_irq_sync(tp)))
7499 napi_schedule(&tnapi->napi);
7501 return IRQ_RETVAL(1);
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7506 struct tg3_napi *tnapi = dev_id;
7507 struct tg3 *tp = tnapi->tp;
7508 struct tg3_hw_status *sblk = tnapi->hw_status;
7509 unsigned int handled = 1;
7511 /* In INTx mode, it is possible for the interrupt to arrive at
7512 * the CPU before the status block posted prior to the interrupt.
7513 * Reading the PCI State register will confirm whether the
7514 * interrupt is ours and will flush the status block.
7516 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517 if (tg3_flag(tp, CHIP_RESETTING) ||
7518 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7525 * Writing any value to intr-mbox-0 clears PCI INTA# and
7526 * chip-internal interrupt pending events.
7527 * Writing non-zero to intr-mbox-0 additional tells the
7528 * NIC to stop sending us irqs, engaging "in-intr-handler"
7531 * Flush the mailbox to de-assert the IRQ immediately to prevent
7532 * spurious interrupts. The flush impacts performance but
7533 * excessive spurious interrupts can be worse in some cases.
7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536 if (tg3_irq_sync(tp))
7538 sblk->status &= ~SD_STATUS_UPDATED;
7539 if (likely(tg3_has_work(tnapi))) {
7540 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541 napi_schedule(&tnapi->napi);
7543 /* No work, shared interrupt perhaps? re-enable
7544 * interrupts, and flush that PCI write
7546 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7550 return IRQ_RETVAL(handled);
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7555 struct tg3_napi *tnapi = dev_id;
7556 struct tg3 *tp = tnapi->tp;
7557 struct tg3_hw_status *sblk = tnapi->hw_status;
7558 unsigned int handled = 1;
7560 /* In INTx mode, it is possible for the interrupt to arrive at
7561 * the CPU before the status block posted prior to the interrupt.
7562 * Reading the PCI State register will confirm whether the
7563 * interrupt is ours and will flush the status block.
7565 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566 if (tg3_flag(tp, CHIP_RESETTING) ||
7567 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7574 * writing any value to intr-mbox-0 clears PCI INTA# and
7575 * chip-internal interrupt pending events.
7576 * writing non-zero to intr-mbox-0 additional tells the
7577 * NIC to stop sending us irqs, engaging "in-intr-handler"
7580 * Flush the mailbox to de-assert the IRQ immediately to prevent
7581 * spurious interrupts. The flush impacts performance but
7582 * excessive spurious interrupts can be worse in some cases.
7584 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7587 * In a shared interrupt configuration, sometimes other devices'
7588 * interrupts will scream. We record the current status tag here
7589 * so that the above check can report that the screaming interrupts
7590 * are unhandled. Eventually they will be silenced.
7592 tnapi->last_irq_tag = sblk->status_tag;
7594 if (tg3_irq_sync(tp))
7597 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7599 napi_schedule(&tnapi->napi);
7602 return IRQ_RETVAL(handled);
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7608 struct tg3_napi *tnapi = dev_id;
7609 struct tg3 *tp = tnapi->tp;
7610 struct tg3_hw_status *sblk = tnapi->hw_status;
7612 if ((sblk->status & SD_STATUS_UPDATED) ||
7613 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614 tg3_disable_ints(tp);
7615 return IRQ_RETVAL(1);
7617 return IRQ_RETVAL(0);
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7624 struct tg3 *tp = netdev_priv(dev);
7626 if (tg3_irq_sync(tp))
7629 for (i = 0; i < tp->irq_cnt; i++)
7630 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7634 static void tg3_tx_timeout(struct net_device *dev)
7636 struct tg3 *tp = netdev_priv(dev);
7638 if (netif_msg_tx_err(tp)) {
7639 netdev_err(dev, "transmit timed out, resetting\n");
7643 tg3_reset_task_schedule(tp);
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7649 u32 base = (u32) mapping & 0xffffffff;
7651 return base + len + 8 < base;
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655 * of any 4GB boundaries: 4G, 8G, etc
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661 u32 base = (u32) mapping & 0xffffffff;
7663 return ((base + len + (mss & 0x3fff)) < base);
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673 if (tg3_flag(tp, 40BIT_DMA_BUG))
7674 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682 dma_addr_t mapping, u32 len, u32 flags,
7685 txbd->addr_hi = ((u64) mapping >> 32);
7686 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692 dma_addr_t map, u32 len, u32 flags,
7695 struct tg3 *tp = tnapi->tp;
7698 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7701 if (tg3_4g_overflow_test(map, len))
7704 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7707 if (tg3_40bit_overflow_test(tp, map, len))
7710 if (tp->dma_limit) {
7711 u32 prvidx = *entry;
7712 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713 while (len > tp->dma_limit && *budget) {
7714 u32 frag_len = tp->dma_limit;
7715 len -= tp->dma_limit;
7717 /* Avoid the 8byte DMA problem */
7719 len += tp->dma_limit / 2;
7720 frag_len = tp->dma_limit / 2;
7723 tnapi->tx_buffers[*entry].fragmented = true;
7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726 frag_len, tmp_flag, mss, vlan);
7729 *entry = NEXT_TX(*entry);
7736 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737 len, flags, mss, vlan);
7739 *entry = NEXT_TX(*entry);
7742 tnapi->tx_buffers[prvidx].fragmented = false;
7746 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747 len, flags, mss, vlan);
7748 *entry = NEXT_TX(*entry);
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7757 struct sk_buff *skb;
7758 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7763 pci_unmap_single(tnapi->tp->pdev,
7764 dma_unmap_addr(txb, mapping),
7768 while (txb->fragmented) {
7769 txb->fragmented = false;
7770 entry = NEXT_TX(entry);
7771 txb = &tnapi->tx_buffers[entry];
7774 for (i = 0; i <= last; i++) {
7775 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7777 entry = NEXT_TX(entry);
7778 txb = &tnapi->tx_buffers[entry];
7780 pci_unmap_page(tnapi->tp->pdev,
7781 dma_unmap_addr(txb, mapping),
7782 skb_frag_size(frag), PCI_DMA_TODEVICE);
7784 while (txb->fragmented) {
7785 txb->fragmented = false;
7786 entry = NEXT_TX(entry);
7787 txb = &tnapi->tx_buffers[entry];
7792 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7793 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7794 struct sk_buff **pskb,
7795 u32 *entry, u32 *budget,
7796 u32 base_flags, u32 mss, u32 vlan)
7798 struct tg3 *tp = tnapi->tp;
7799 struct sk_buff *new_skb, *skb = *pskb;
7800 dma_addr_t new_addr = 0;
7803 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7804 new_skb = skb_copy(skb, GFP_ATOMIC);
7806 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7808 new_skb = skb_copy_expand(skb,
7809 skb_headroom(skb) + more_headroom,
7810 skb_tailroom(skb), GFP_ATOMIC);
7816 /* New SKB is guaranteed to be linear. */
7817 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7819 /* Make sure the mapping succeeded */
7820 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7821 dev_kfree_skb_any(new_skb);
7824 u32 save_entry = *entry;
7826 base_flags |= TXD_FLAG_END;
7828 tnapi->tx_buffers[*entry].skb = new_skb;
7829 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7832 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7833 new_skb->len, base_flags,
7835 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7836 dev_kfree_skb_any(new_skb);
7842 dev_consume_skb_any(skb);
7847 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7849 /* Check if we will never have enough descriptors,
7850 * as gso_segs can be more than current ring size
7852 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7855 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7857 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7858 * indicated in tg3_tx_frag_set()
7860 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7861 struct netdev_queue *txq, struct sk_buff *skb)
7863 struct sk_buff *segs, *nskb;
7864 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7866 /* Estimate the number of fragments in the worst case */
7867 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7868 netif_tx_stop_queue(txq);
7870 /* netif_tx_stop_queue() must be done before checking
7871 * checking tx index in tg3_tx_avail() below, because in
7872 * tg3_tx(), we update tx index before checking for
7873 * netif_tx_queue_stopped().
7876 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7877 return NETDEV_TX_BUSY;
7879 netif_tx_wake_queue(txq);
7882 segs = skb_gso_segment(skb, tp->dev->features &
7883 ~(NETIF_F_TSO | NETIF_F_TSO6));
7884 if (IS_ERR(segs) || !segs)
7885 goto tg3_tso_bug_end;
7891 tg3_start_xmit(nskb, tp->dev);
7895 dev_consume_skb_any(skb);
7897 return NETDEV_TX_OK;
7900 /* hard_start_xmit for all devices */
7901 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7903 struct tg3 *tp = netdev_priv(dev);
7904 u32 len, entry, base_flags, mss, vlan = 0;
7906 int i = -1, would_hit_hwbug;
7908 struct tg3_napi *tnapi;
7909 struct netdev_queue *txq;
7911 struct iphdr *iph = NULL;
7912 struct tcphdr *tcph = NULL;
7913 __sum16 tcp_csum = 0, ip_csum = 0;
7914 __be16 ip_tot_len = 0;
7916 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7917 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7918 if (tg3_flag(tp, ENABLE_TSS))
7921 budget = tg3_tx_avail(tnapi);
7923 /* We are running in BH disabled context with netif_tx_lock
7924 * and TX reclaim runs via tp->napi.poll inside of a software
7925 * interrupt. Furthermore, IRQ processing runs lockless so we have
7926 * no IRQ context deadlocks to worry about either. Rejoice!
7928 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7929 if (!netif_tx_queue_stopped(txq)) {
7930 netif_tx_stop_queue(txq);
7932 /* This is a hard error, log it. */
7934 "BUG! Tx Ring full when queue awake!\n");
7936 return NETDEV_TX_BUSY;
7939 entry = tnapi->tx_prod;
7942 mss = skb_shinfo(skb)->gso_size;
7944 u32 tcp_opt_len, hdr_len;
7946 if (skb_cow_head(skb, 0))
7950 tcp_opt_len = tcp_optlen(skb);
7952 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7954 /* HW/FW can not correctly segment packets that have been
7955 * vlan encapsulated.
7957 if (skb->protocol == htons(ETH_P_8021Q) ||
7958 skb->protocol == htons(ETH_P_8021AD)) {
7959 if (tg3_tso_bug_gso_check(tnapi, skb))
7960 return tg3_tso_bug(tp, tnapi, txq, skb);
7964 if (!skb_is_gso_v6(skb)) {
7965 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7966 tg3_flag(tp, TSO_BUG)) {
7967 if (tg3_tso_bug_gso_check(tnapi, skb))
7968 return tg3_tso_bug(tp, tnapi, txq, skb);
7971 ip_csum = iph->check;
7972 ip_tot_len = iph->tot_len;
7974 iph->tot_len = htons(mss + hdr_len);
7977 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7978 TXD_FLAG_CPU_POST_DMA);
7980 tcph = tcp_hdr(skb);
7981 tcp_csum = tcph->check;
7983 if (tg3_flag(tp, HW_TSO_1) ||
7984 tg3_flag(tp, HW_TSO_2) ||
7985 tg3_flag(tp, HW_TSO_3)) {
7987 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7989 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7993 if (tg3_flag(tp, HW_TSO_3)) {
7994 mss |= (hdr_len & 0xc) << 12;
7996 base_flags |= 0x00000010;
7997 base_flags |= (hdr_len & 0x3e0) << 5;
7998 } else if (tg3_flag(tp, HW_TSO_2))
7999 mss |= hdr_len << 9;
8000 else if (tg3_flag(tp, HW_TSO_1) ||
8001 tg3_asic_rev(tp) == ASIC_REV_5705) {
8002 if (tcp_opt_len || iph->ihl > 5) {
8005 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8006 mss |= (tsflags << 11);
8009 if (tcp_opt_len || iph->ihl > 5) {
8012 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013 base_flags |= tsflags << 12;
8016 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8017 /* HW/FW can not correctly checksum packets that have been
8018 * vlan encapsulated.
8020 if (skb->protocol == htons(ETH_P_8021Q) ||
8021 skb->protocol == htons(ETH_P_8021AD)) {
8022 if (skb_checksum_help(skb))
8025 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8029 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8030 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8031 base_flags |= TXD_FLAG_JMB_PKT;
8033 if (skb_vlan_tag_present(skb)) {
8034 base_flags |= TXD_FLAG_VLAN;
8035 vlan = skb_vlan_tag_get(skb);
8038 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8039 tg3_flag(tp, TX_TSTAMP_EN)) {
8040 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8041 base_flags |= TXD_FLAG_HWTSTAMP;
8044 len = skb_headlen(skb);
8046 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8047 if (pci_dma_mapping_error(tp->pdev, mapping))
8051 tnapi->tx_buffers[entry].skb = skb;
8052 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8054 would_hit_hwbug = 0;
8056 if (tg3_flag(tp, 5701_DMA_BUG))
8057 would_hit_hwbug = 1;
8059 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8060 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8062 would_hit_hwbug = 1;
8063 } else if (skb_shinfo(skb)->nr_frags > 0) {
8066 if (!tg3_flag(tp, HW_TSO_1) &&
8067 !tg3_flag(tp, HW_TSO_2) &&
8068 !tg3_flag(tp, HW_TSO_3))
8071 /* Now loop through additional data
8072 * fragments, and queue them.
8074 last = skb_shinfo(skb)->nr_frags - 1;
8075 for (i = 0; i <= last; i++) {
8076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8078 len = skb_frag_size(frag);
8079 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8080 len, DMA_TO_DEVICE);
8082 tnapi->tx_buffers[entry].skb = NULL;
8083 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8085 if (dma_mapping_error(&tp->pdev->dev, mapping))
8089 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8091 ((i == last) ? TXD_FLAG_END : 0),
8093 would_hit_hwbug = 1;
8099 if (would_hit_hwbug) {
8100 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8102 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8103 /* If it's a TSO packet, do GSO instead of
8104 * allocating and copying to a large linear SKB
8107 iph->check = ip_csum;
8108 iph->tot_len = ip_tot_len;
8110 tcph->check = tcp_csum;
8111 return tg3_tso_bug(tp, tnapi, txq, skb);
8114 /* If the workaround fails due to memory/mapping
8115 * failure, silently drop this packet.
8117 entry = tnapi->tx_prod;
8118 budget = tg3_tx_avail(tnapi);
8119 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8120 base_flags, mss, vlan))
8124 skb_tx_timestamp(skb);
8125 netdev_tx_sent_queue(txq, skb->len);
8127 /* Sync BD data before updating mailbox */
8130 tnapi->tx_prod = entry;
8131 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8132 netif_tx_stop_queue(txq);
8134 /* netif_tx_stop_queue() must be done before checking
8135 * checking tx index in tg3_tx_avail() below, because in
8136 * tg3_tx(), we update tx index before checking for
8137 * netif_tx_queue_stopped().
8140 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8141 netif_tx_wake_queue(txq);
8144 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8145 /* Packets are ready, update Tx producer idx on card. */
8146 tw32_tx_mbox(tnapi->prodmbox, entry);
8150 return NETDEV_TX_OK;
8153 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8154 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8156 dev_kfree_skb_any(skb);
8159 return NETDEV_TX_OK;
8162 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8165 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8166 MAC_MODE_PORT_MODE_MASK);
8168 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8170 if (!tg3_flag(tp, 5705_PLUS))
8171 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8173 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8174 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8176 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8178 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8180 if (tg3_flag(tp, 5705_PLUS) ||
8181 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8182 tg3_asic_rev(tp) == ASIC_REV_5700)
8183 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8186 tw32(MAC_MODE, tp->mac_mode);
8190 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8192 u32 val, bmcr, mac_mode, ptest = 0;
8194 tg3_phy_toggle_apd(tp, false);
8195 tg3_phy_toggle_automdix(tp, false);
8197 if (extlpbk && tg3_phy_set_extloopbk(tp))
8200 bmcr = BMCR_FULLDPLX;
8205 bmcr |= BMCR_SPEED100;
8209 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8211 bmcr |= BMCR_SPEED100;
8214 bmcr |= BMCR_SPEED1000;
8219 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8220 tg3_readphy(tp, MII_CTRL1000, &val);
8221 val |= CTL1000_AS_MASTER |
8222 CTL1000_ENABLE_MASTER;
8223 tg3_writephy(tp, MII_CTRL1000, val);
8225 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8226 MII_TG3_FET_PTEST_TRIM_2;
8227 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8230 bmcr |= BMCR_LOOPBACK;
8232 tg3_writephy(tp, MII_BMCR, bmcr);
8234 /* The write needs to be flushed for the FETs */
8235 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8236 tg3_readphy(tp, MII_BMCR, &bmcr);
8240 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8241 tg3_asic_rev(tp) == ASIC_REV_5785) {
8242 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8243 MII_TG3_FET_PTEST_FRC_TX_LINK |
8244 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8246 /* The write needs to be flushed for the AC131 */
8247 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8250 /* Reset to prevent losing 1st rx packet intermittently */
8251 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8252 tg3_flag(tp, 5780_CLASS)) {
8253 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8255 tw32_f(MAC_RX_MODE, tp->rx_mode);
8258 mac_mode = tp->mac_mode &
8259 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8260 if (speed == SPEED_1000)
8261 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8263 mac_mode |= MAC_MODE_PORT_MODE_MII;
8265 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8266 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8268 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8269 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8270 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8271 mac_mode |= MAC_MODE_LINK_POLARITY;
8273 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8274 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8277 tw32(MAC_MODE, mac_mode);
8283 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8285 struct tg3 *tp = netdev_priv(dev);
8287 if (features & NETIF_F_LOOPBACK) {
8288 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8291 spin_lock_bh(&tp->lock);
8292 tg3_mac_loopback(tp, true);
8293 netif_carrier_on(tp->dev);
8294 spin_unlock_bh(&tp->lock);
8295 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8297 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8300 spin_lock_bh(&tp->lock);
8301 tg3_mac_loopback(tp, false);
8302 /* Force link status check */
8303 tg3_setup_phy(tp, true);
8304 spin_unlock_bh(&tp->lock);
8305 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8309 static netdev_features_t tg3_fix_features(struct net_device *dev,
8310 netdev_features_t features)
8312 struct tg3 *tp = netdev_priv(dev);
8314 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8315 features &= ~NETIF_F_ALL_TSO;
8320 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8322 netdev_features_t changed = dev->features ^ features;
8324 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8325 tg3_set_loopback(dev, features);
8330 static void tg3_rx_prodring_free(struct tg3 *tp,
8331 struct tg3_rx_prodring_set *tpr)
8335 if (tpr != &tp->napi[0].prodring) {
8336 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8337 i = (i + 1) & tp->rx_std_ring_mask)
8338 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8341 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8342 for (i = tpr->rx_jmb_cons_idx;
8343 i != tpr->rx_jmb_prod_idx;
8344 i = (i + 1) & tp->rx_jmb_ring_mask) {
8345 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8353 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8354 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8357 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8358 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8359 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8364 /* Initialize rx rings for packet processing.
8366 * The chip has been shut down and the driver detached from
8367 * the networking, so no interrupts or new tx packets will
8368 * end up in the driver. tp->{tx,}lock are held and thus
8371 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8372 struct tg3_rx_prodring_set *tpr)
8374 u32 i, rx_pkt_dma_sz;
8376 tpr->rx_std_cons_idx = 0;
8377 tpr->rx_std_prod_idx = 0;
8378 tpr->rx_jmb_cons_idx = 0;
8379 tpr->rx_jmb_prod_idx = 0;
8381 if (tpr != &tp->napi[0].prodring) {
8382 memset(&tpr->rx_std_buffers[0], 0,
8383 TG3_RX_STD_BUFF_RING_SIZE(tp));
8384 if (tpr->rx_jmb_buffers)
8385 memset(&tpr->rx_jmb_buffers[0], 0,
8386 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8390 /* Zero out all descriptors. */
8391 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8393 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8394 if (tg3_flag(tp, 5780_CLASS) &&
8395 tp->dev->mtu > ETH_DATA_LEN)
8396 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8397 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8399 /* Initialize invariants of the rings, we only set this
8400 * stuff once. This works because the card does not
8401 * write into the rx buffer posting rings.
8403 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8404 struct tg3_rx_buffer_desc *rxd;
8406 rxd = &tpr->rx_std[i];
8407 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8408 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8409 rxd->opaque = (RXD_OPAQUE_RING_STD |
8410 (i << RXD_OPAQUE_INDEX_SHIFT));
8413 /* Now allocate fresh SKBs for each rx ring. */
8414 for (i = 0; i < tp->rx_pending; i++) {
8415 unsigned int frag_size;
8417 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8419 netdev_warn(tp->dev,
8420 "Using a smaller RX standard ring. Only "
8421 "%d out of %d buffers were allocated "
8422 "successfully\n", i, tp->rx_pending);
8430 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8433 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8435 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8438 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8439 struct tg3_rx_buffer_desc *rxd;
8441 rxd = &tpr->rx_jmb[i].std;
8442 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8443 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8445 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8446 (i << RXD_OPAQUE_INDEX_SHIFT));
8449 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8450 unsigned int frag_size;
8452 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8454 netdev_warn(tp->dev,
8455 "Using a smaller RX jumbo ring. Only %d "
8456 "out of %d buffers were allocated "
8457 "successfully\n", i, tp->rx_jumbo_pending);
8460 tp->rx_jumbo_pending = i;
8469 tg3_rx_prodring_free(tp, tpr);
8473 static void tg3_rx_prodring_fini(struct tg3 *tp,
8474 struct tg3_rx_prodring_set *tpr)
8476 kfree(tpr->rx_std_buffers);
8477 tpr->rx_std_buffers = NULL;
8478 kfree(tpr->rx_jmb_buffers);
8479 tpr->rx_jmb_buffers = NULL;
8481 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8482 tpr->rx_std, tpr->rx_std_mapping);
8486 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8487 tpr->rx_jmb, tpr->rx_jmb_mapping);
8492 static int tg3_rx_prodring_init(struct tg3 *tp,
8493 struct tg3_rx_prodring_set *tpr)
8495 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8497 if (!tpr->rx_std_buffers)
8500 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_STD_RING_BYTES(tp),
8502 &tpr->rx_std_mapping,
8507 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8508 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8510 if (!tpr->rx_jmb_buffers)
8513 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8514 TG3_RX_JMB_RING_BYTES(tp),
8515 &tpr->rx_jmb_mapping,
8524 tg3_rx_prodring_fini(tp, tpr);
8528 /* Free up pending packets in all rx/tx rings.
8530 * The chip has been shut down and the driver detached from
8531 * the networking, so no interrupts or new tx packets will
8532 * end up in the driver. tp->{tx,}lock is not held and we are not
8533 * in an interrupt context and thus may sleep.
8535 static void tg3_free_rings(struct tg3 *tp)
8539 for (j = 0; j < tp->irq_cnt; j++) {
8540 struct tg3_napi *tnapi = &tp->napi[j];
8542 tg3_rx_prodring_free(tp, &tnapi->prodring);
8544 if (!tnapi->tx_buffers)
8547 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8548 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8553 tg3_tx_skb_unmap(tnapi, i,
8554 skb_shinfo(skb)->nr_frags - 1);
8556 dev_consume_skb_any(skb);
8558 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8562 /* Initialize tx/rx rings for packet processing.
8564 * The chip has been shut down and the driver detached from
8565 * the networking, so no interrupts or new tx packets will
8566 * end up in the driver. tp->{tx,}lock are held and thus
8569 static int tg3_init_rings(struct tg3 *tp)
8573 /* Free up all the SKBs. */
8576 for (i = 0; i < tp->irq_cnt; i++) {
8577 struct tg3_napi *tnapi = &tp->napi[i];
8579 tnapi->last_tag = 0;
8580 tnapi->last_irq_tag = 0;
8581 tnapi->hw_status->status = 0;
8582 tnapi->hw_status->status_tag = 0;
8583 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8588 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8590 tnapi->rx_rcb_ptr = 0;
8592 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8594 if (tnapi->prodring.rx_std &&
8595 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8604 static void tg3_mem_tx_release(struct tg3 *tp)
8608 for (i = 0; i < tp->irq_max; i++) {
8609 struct tg3_napi *tnapi = &tp->napi[i];
8611 if (tnapi->tx_ring) {
8612 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8613 tnapi->tx_ring, tnapi->tx_desc_mapping);
8614 tnapi->tx_ring = NULL;
8617 kfree(tnapi->tx_buffers);
8618 tnapi->tx_buffers = NULL;
8622 static int tg3_mem_tx_acquire(struct tg3 *tp)
8625 struct tg3_napi *tnapi = &tp->napi[0];
8627 /* If multivector TSS is enabled, vector 0 does not handle
8628 * tx interrupts. Don't allocate any resources for it.
8630 if (tg3_flag(tp, ENABLE_TSS))
8633 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8634 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8635 TG3_TX_RING_SIZE, GFP_KERNEL);
8636 if (!tnapi->tx_buffers)
8639 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8641 &tnapi->tx_desc_mapping,
8643 if (!tnapi->tx_ring)
8650 tg3_mem_tx_release(tp);
8654 static void tg3_mem_rx_release(struct tg3 *tp)
8658 for (i = 0; i < tp->irq_max; i++) {
8659 struct tg3_napi *tnapi = &tp->napi[i];
8661 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8666 dma_free_coherent(&tp->pdev->dev,
8667 TG3_RX_RCB_RING_BYTES(tp),
8669 tnapi->rx_rcb_mapping);
8670 tnapi->rx_rcb = NULL;
8674 static int tg3_mem_rx_acquire(struct tg3 *tp)
8676 unsigned int i, limit;
8678 limit = tp->rxq_cnt;
8680 /* If RSS is enabled, we need a (dummy) producer ring
8681 * set on vector zero. This is the true hw prodring.
8683 if (tg3_flag(tp, ENABLE_RSS))
8686 for (i = 0; i < limit; i++) {
8687 struct tg3_napi *tnapi = &tp->napi[i];
8689 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8692 /* If multivector RSS is enabled, vector 0
8693 * does not handle rx or tx interrupts.
8694 * Don't allocate any resources for it.
8696 if (!i && tg3_flag(tp, ENABLE_RSS))
8699 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8700 TG3_RX_RCB_RING_BYTES(tp),
8701 &tnapi->rx_rcb_mapping,
8710 tg3_mem_rx_release(tp);
8715 * Must not be invoked with interrupt sources disabled and
8716 * the hardware shutdown down.
8718 static void tg3_free_consistent(struct tg3 *tp)
8722 for (i = 0; i < tp->irq_cnt; i++) {
8723 struct tg3_napi *tnapi = &tp->napi[i];
8725 if (tnapi->hw_status) {
8726 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8728 tnapi->status_mapping);
8729 tnapi->hw_status = NULL;
8733 tg3_mem_rx_release(tp);
8734 tg3_mem_tx_release(tp);
8736 /* tp->hw_stats can be referenced safely:
8737 * 1. under rtnl_lock
8738 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8741 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8742 tp->hw_stats, tp->stats_mapping);
8743 tp->hw_stats = NULL;
8748 * Must not be invoked with interrupt sources disabled and
8749 * the hardware shutdown down. Can sleep.
8751 static int tg3_alloc_consistent(struct tg3 *tp)
8755 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8756 sizeof(struct tg3_hw_stats),
8757 &tp->stats_mapping, GFP_KERNEL);
8761 for (i = 0; i < tp->irq_cnt; i++) {
8762 struct tg3_napi *tnapi = &tp->napi[i];
8763 struct tg3_hw_status *sblk;
8765 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8767 &tnapi->status_mapping,
8769 if (!tnapi->hw_status)
8772 sblk = tnapi->hw_status;
8774 if (tg3_flag(tp, ENABLE_RSS)) {
8775 u16 *prodptr = NULL;
8778 * When RSS is enabled, the status block format changes
8779 * slightly. The "rx_jumbo_consumer", "reserved",
8780 * and "rx_mini_consumer" members get mapped to the
8781 * other three rx return ring producer indexes.
8785 prodptr = &sblk->idx[0].rx_producer;
8788 prodptr = &sblk->rx_jumbo_consumer;
8791 prodptr = &sblk->reserved;
8794 prodptr = &sblk->rx_mini_consumer;
8797 tnapi->rx_rcb_prod_idx = prodptr;
8799 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8803 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8809 tg3_free_consistent(tp);
8813 #define MAX_WAIT_CNT 1000
8815 /* To stop a block, clear the enable bit and poll till it
8816 * clears. tp->lock is held.
8818 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8823 if (tg3_flag(tp, 5705_PLUS)) {
8830 /* We can't enable/disable these bits of the
8831 * 5705/5750, just say success.
8844 for (i = 0; i < MAX_WAIT_CNT; i++) {
8845 if (pci_channel_offline(tp->pdev)) {
8846 dev_err(&tp->pdev->dev,
8847 "tg3_stop_block device offline, "
8848 "ofs=%lx enable_bit=%x\n",
8855 if ((val & enable_bit) == 0)
8859 if (i == MAX_WAIT_CNT && !silent) {
8860 dev_err(&tp->pdev->dev,
8861 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8869 /* tp->lock is held. */
8870 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8874 tg3_disable_ints(tp);
8876 if (pci_channel_offline(tp->pdev)) {
8877 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8878 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8883 tp->rx_mode &= ~RX_MODE_ENABLE;
8884 tw32_f(MAC_RX_MODE, tp->rx_mode);
8887 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8902 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8903 tw32_f(MAC_MODE, tp->mac_mode);
8906 tp->tx_mode &= ~TX_MODE_ENABLE;
8907 tw32_f(MAC_TX_MODE, tp->tx_mode);
8909 for (i = 0; i < MAX_WAIT_CNT; i++) {
8911 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8914 if (i >= MAX_WAIT_CNT) {
8915 dev_err(&tp->pdev->dev,
8916 "%s timed out, TX_MODE_ENABLE will not clear "
8917 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8921 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8922 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8923 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8925 tw32(FTQ_RESET, 0xffffffff);
8926 tw32(FTQ_RESET, 0x00000000);
8928 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8929 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8932 for (i = 0; i < tp->irq_cnt; i++) {
8933 struct tg3_napi *tnapi = &tp->napi[i];
8934 if (tnapi->hw_status)
8935 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8941 /* Save PCI command register before chip reset */
8942 static void tg3_save_pci_state(struct tg3 *tp)
8944 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8947 /* Restore PCI state after chip reset */
8948 static void tg3_restore_pci_state(struct tg3 *tp)
8952 /* Re-enable indirect register accesses. */
8953 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8954 tp->misc_host_ctrl);
8956 /* Set MAX PCI retry to zero. */
8957 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8958 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8959 tg3_flag(tp, PCIX_MODE))
8960 val |= PCISTATE_RETRY_SAME_DMA;
8961 /* Allow reads and writes to the APE register and memory space. */
8962 if (tg3_flag(tp, ENABLE_APE))
8963 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8964 PCISTATE_ALLOW_APE_SHMEM_WR |
8965 PCISTATE_ALLOW_APE_PSPACE_WR;
8966 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8968 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8970 if (!tg3_flag(tp, PCI_EXPRESS)) {
8971 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8972 tp->pci_cacheline_sz);
8973 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8977 /* Make sure PCI-X relaxed ordering bit is clear. */
8978 if (tg3_flag(tp, PCIX_MODE)) {
8981 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8983 pcix_cmd &= ~PCI_X_CMD_ERO;
8984 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988 if (tg3_flag(tp, 5780_CLASS)) {
8990 /* Chip reset on 5780 will reset MSI enable bit,
8991 * so need to restore it.
8993 if (tg3_flag(tp, USING_MSI)) {
8996 pci_read_config_word(tp->pdev,
8997 tp->msi_cap + PCI_MSI_FLAGS,
8999 pci_write_config_word(tp->pdev,
9000 tp->msi_cap + PCI_MSI_FLAGS,
9001 ctrl | PCI_MSI_FLAGS_ENABLE);
9002 val = tr32(MSGINT_MODE);
9003 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9008 static void tg3_override_clk(struct tg3 *tp)
9012 switch (tg3_asic_rev(tp)) {
9014 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9015 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9016 TG3_CPMU_MAC_ORIDE_ENABLE);
9021 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9029 static void tg3_restore_clk(struct tg3 *tp)
9033 switch (tg3_asic_rev(tp)) {
9035 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9036 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9037 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9042 val = tr32(TG3_CPMU_CLCK_ORIDE);
9043 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9051 /* tp->lock is held. */
9052 static int tg3_chip_reset(struct tg3 *tp)
9053 __releases(tp->lock)
9054 __acquires(tp->lock)
9057 void (*write_op)(struct tg3 *, u32, u32);
9060 if (!pci_device_is_present(tp->pdev))
9065 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9067 /* No matching tg3_nvram_unlock() after this because
9068 * chip reset below will undo the nvram lock.
9070 tp->nvram_lock_cnt = 0;
9072 /* GRC_MISC_CFG core clock reset will clear the memory
9073 * enable bit in PCI register 4 and the MSI enable bit
9074 * on some chips, so we save relevant registers here.
9076 tg3_save_pci_state(tp);
9078 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9079 tg3_flag(tp, 5755_PLUS))
9080 tw32(GRC_FASTBOOT_PC, 0);
9083 * We must avoid the readl() that normally takes place.
9084 * It locks machines, causes machine checks, and other
9085 * fun things. So, temporarily disable the 5701
9086 * hardware workaround, while we do the reset.
9088 write_op = tp->write32;
9089 if (write_op == tg3_write_flush_reg32)
9090 tp->write32 = tg3_write32;
9092 /* Prevent the irq handler from reading or writing PCI registers
9093 * during chip reset when the memory enable bit in the PCI command
9094 * register may be cleared. The chip does not generate interrupt
9095 * at this time, but the irq handler may still be called due to irq
9096 * sharing or irqpoll.
9098 tg3_flag_set(tp, CHIP_RESETTING);
9099 for (i = 0; i < tp->irq_cnt; i++) {
9100 struct tg3_napi *tnapi = &tp->napi[i];
9101 if (tnapi->hw_status) {
9102 tnapi->hw_status->status = 0;
9103 tnapi->hw_status->status_tag = 0;
9105 tnapi->last_tag = 0;
9106 tnapi->last_irq_tag = 0;
9110 tg3_full_unlock(tp);
9112 for (i = 0; i < tp->irq_cnt; i++)
9113 synchronize_irq(tp->napi[i].irq_vec);
9115 tg3_full_lock(tp, 0);
9117 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9118 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9119 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9123 val = GRC_MISC_CFG_CORECLK_RESET;
9125 if (tg3_flag(tp, PCI_EXPRESS)) {
9126 /* Force PCIe 1.0a mode */
9127 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9128 !tg3_flag(tp, 57765_PLUS) &&
9129 tr32(TG3_PCIE_PHY_TSTCTL) ==
9130 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9131 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9133 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9134 tw32(GRC_MISC_CFG, (1 << 29));
9139 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9140 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9141 tw32(GRC_VCPU_EXT_CTRL,
9142 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9145 /* Set the clock to the highest frequency to avoid timeouts. With link
9146 * aware mode, the clock speed could be slow and bootcode does not
9147 * complete within the expected time. Override the clock to allow the
9148 * bootcode to finish sooner and then restore it.
9150 tg3_override_clk(tp);
9152 /* Manage gphy power for all CPMU absent PCIe devices. */
9153 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9154 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9156 tw32(GRC_MISC_CFG, val);
9158 /* restore 5701 hardware bug workaround write method */
9159 tp->write32 = write_op;
9161 /* Unfortunately, we have to delay before the PCI read back.
9162 * Some 575X chips even will not respond to a PCI cfg access
9163 * when the reset command is given to the chip.
9165 * How do these hardware designers expect things to work
9166 * properly if the PCI write is posted for a long period
9167 * of time? It is always necessary to have some method by
9168 * which a register read back can occur to push the write
9169 * out which does the reset.
9171 * For most tg3 variants the trick below was working.
9176 /* Flush PCI posted writes. The normal MMIO registers
9177 * are inaccessible at this time so this is the only
9178 * way to make this reliably (actually, this is no longer
9179 * the case, see above). I tried to use indirect
9180 * register read/write but this upset some 5701 variants.
9182 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9186 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9189 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9193 /* Wait for link training to complete. */
9194 for (j = 0; j < 5000; j++)
9197 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9198 pci_write_config_dword(tp->pdev, 0xc4,
9199 cfg_val | (1 << 15));
9202 /* Clear the "no snoop" and "relaxed ordering" bits. */
9203 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9205 * Older PCIe devices only support the 128 byte
9206 * MPS setting. Enforce the restriction.
9208 if (!tg3_flag(tp, CPMU_PRESENT))
9209 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9210 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9212 /* Clear error status */
9213 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9214 PCI_EXP_DEVSTA_CED |
9215 PCI_EXP_DEVSTA_NFED |
9216 PCI_EXP_DEVSTA_FED |
9217 PCI_EXP_DEVSTA_URD);
9220 tg3_restore_pci_state(tp);
9222 tg3_flag_clear(tp, CHIP_RESETTING);
9223 tg3_flag_clear(tp, ERROR_PROCESSED);
9226 if (tg3_flag(tp, 5780_CLASS))
9227 val = tr32(MEMARB_MODE);
9228 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9230 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9232 tw32(0x5000, 0x400);
9235 if (tg3_flag(tp, IS_SSB_CORE)) {
9237 * BCM4785: In order to avoid repercussions from using
9238 * potentially defective internal ROM, stop the Rx RISC CPU,
9239 * which is not required.
9242 tg3_halt_cpu(tp, RX_CPU_BASE);
9245 err = tg3_poll_fw(tp);
9249 tw32(GRC_MODE, tp->grc_mode);
9251 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9254 tw32(0xc4, val | (1 << 15));
9257 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9258 tg3_asic_rev(tp) == ASIC_REV_5705) {
9259 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9260 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9261 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9262 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9266 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9268 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9269 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9274 tw32_f(MAC_MODE, val);
9277 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9281 if (tg3_flag(tp, PCI_EXPRESS) &&
9282 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9283 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9284 !tg3_flag(tp, 57765_PLUS)) {
9287 tw32(0x7c00, val | (1 << 25));
9290 tg3_restore_clk(tp);
9292 /* Reprobe ASF enable state. */
9293 tg3_flag_clear(tp, ENABLE_ASF);
9294 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9295 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9297 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9298 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9299 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9302 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9303 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9304 tg3_flag_set(tp, ENABLE_ASF);
9305 tp->last_event_jiffies = jiffies;
9306 if (tg3_flag(tp, 5750_PLUS))
9307 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9309 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9310 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9311 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9312 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9313 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9320 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9321 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9322 static void __tg3_set_rx_mode(struct net_device *);
9324 /* tp->lock is held. */
9325 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9331 tg3_write_sig_pre_reset(tp, kind);
9333 tg3_abort_hw(tp, silent);
9334 err = tg3_chip_reset(tp);
9336 __tg3_set_mac_addr(tp, false);
9338 tg3_write_sig_legacy(tp, kind);
9339 tg3_write_sig_post_reset(tp, kind);
9342 /* Save the stats across chip resets... */
9343 tg3_get_nstats(tp, &tp->net_stats_prev);
9344 tg3_get_estats(tp, &tp->estats_prev);
9346 /* And make sure the next sample is new data */
9347 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9353 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9355 struct tg3 *tp = netdev_priv(dev);
9356 struct sockaddr *addr = p;
9358 bool skip_mac_1 = false;
9360 if (!is_valid_ether_addr(addr->sa_data))
9361 return -EADDRNOTAVAIL;
9363 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9365 if (!netif_running(dev))
9368 if (tg3_flag(tp, ENABLE_ASF)) {
9369 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9371 addr0_high = tr32(MAC_ADDR_0_HIGH);
9372 addr0_low = tr32(MAC_ADDR_0_LOW);
9373 addr1_high = tr32(MAC_ADDR_1_HIGH);
9374 addr1_low = tr32(MAC_ADDR_1_LOW);
9376 /* Skip MAC addr 1 if ASF is using it. */
9377 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9378 !(addr1_high == 0 && addr1_low == 0))
9381 spin_lock_bh(&tp->lock);
9382 __tg3_set_mac_addr(tp, skip_mac_1);
9383 __tg3_set_rx_mode(dev);
9384 spin_unlock_bh(&tp->lock);
9389 /* tp->lock is held. */
9390 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9391 dma_addr_t mapping, u32 maxlen_flags,
9395 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9396 ((u64) mapping >> 32));
9398 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9399 ((u64) mapping & 0xffffffff));
9401 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9404 if (!tg3_flag(tp, 5705_PLUS))
9406 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9411 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9415 if (!tg3_flag(tp, ENABLE_TSS)) {
9416 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9417 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9418 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9420 tw32(HOSTCC_TXCOL_TICKS, 0);
9421 tw32(HOSTCC_TXMAX_FRAMES, 0);
9422 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9424 for (; i < tp->txq_cnt; i++) {
9427 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9428 tw32(reg, ec->tx_coalesce_usecs);
9429 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9430 tw32(reg, ec->tx_max_coalesced_frames);
9431 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9432 tw32(reg, ec->tx_max_coalesced_frames_irq);
9436 for (; i < tp->irq_max - 1; i++) {
9437 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9438 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9439 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9443 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9446 u32 limit = tp->rxq_cnt;
9448 if (!tg3_flag(tp, ENABLE_RSS)) {
9449 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9450 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9451 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9454 tw32(HOSTCC_RXCOL_TICKS, 0);
9455 tw32(HOSTCC_RXMAX_FRAMES, 0);
9456 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9459 for (; i < limit; i++) {
9462 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9463 tw32(reg, ec->rx_coalesce_usecs);
9464 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9465 tw32(reg, ec->rx_max_coalesced_frames);
9466 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9467 tw32(reg, ec->rx_max_coalesced_frames_irq);
9470 for (; i < tp->irq_max - 1; i++) {
9471 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9472 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9473 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9477 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9479 tg3_coal_tx_init(tp, ec);
9480 tg3_coal_rx_init(tp, ec);
9482 if (!tg3_flag(tp, 5705_PLUS)) {
9483 u32 val = ec->stats_block_coalesce_usecs;
9485 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9486 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9491 tw32(HOSTCC_STAT_COAL_TICKS, val);
9495 /* tp->lock is held. */
9496 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9500 /* Disable all transmit rings but the first. */
9501 if (!tg3_flag(tp, 5705_PLUS))
9502 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9503 else if (tg3_flag(tp, 5717_PLUS))
9504 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9505 else if (tg3_flag(tp, 57765_CLASS) ||
9506 tg3_asic_rev(tp) == ASIC_REV_5762)
9507 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9509 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9512 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9513 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9514 BDINFO_FLAGS_DISABLED);
9517 /* tp->lock is held. */
9518 static void tg3_tx_rcbs_init(struct tg3 *tp)
9521 u32 txrcb = NIC_SRAM_SEND_RCB;
9523 if (tg3_flag(tp, ENABLE_TSS))
9526 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9527 struct tg3_napi *tnapi = &tp->napi[i];
9529 if (!tnapi->tx_ring)
9532 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9533 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9534 NIC_SRAM_TX_BUFFER_DESC);
9538 /* tp->lock is held. */
9539 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9543 /* Disable all receive return rings but the first. */
9544 if (tg3_flag(tp, 5717_PLUS))
9545 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9546 else if (!tg3_flag(tp, 5705_PLUS))
9547 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9548 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9549 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9550 tg3_flag(tp, 57765_CLASS))
9551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9553 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9556 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9557 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9558 BDINFO_FLAGS_DISABLED);
9561 /* tp->lock is held. */
9562 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9565 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9567 if (tg3_flag(tp, ENABLE_RSS))
9570 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9571 struct tg3_napi *tnapi = &tp->napi[i];
9576 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9577 (tp->rx_ret_ring_mask + 1) <<
9578 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9582 /* tp->lock is held. */
9583 static void tg3_rings_reset(struct tg3 *tp)
9587 struct tg3_napi *tnapi = &tp->napi[0];
9589 tg3_tx_rcbs_disable(tp);
9591 tg3_rx_ret_rcbs_disable(tp);
9593 /* Disable interrupts */
9594 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9595 tp->napi[0].chk_msi_cnt = 0;
9596 tp->napi[0].last_rx_cons = 0;
9597 tp->napi[0].last_tx_cons = 0;
9599 /* Zero mailbox registers. */
9600 if (tg3_flag(tp, SUPPORT_MSIX)) {
9601 for (i = 1; i < tp->irq_max; i++) {
9602 tp->napi[i].tx_prod = 0;
9603 tp->napi[i].tx_cons = 0;
9604 if (tg3_flag(tp, ENABLE_TSS))
9605 tw32_mailbox(tp->napi[i].prodmbox, 0);
9606 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9607 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9608 tp->napi[i].chk_msi_cnt = 0;
9609 tp->napi[i].last_rx_cons = 0;
9610 tp->napi[i].last_tx_cons = 0;
9612 if (!tg3_flag(tp, ENABLE_TSS))
9613 tw32_mailbox(tp->napi[0].prodmbox, 0);
9615 tp->napi[0].tx_prod = 0;
9616 tp->napi[0].tx_cons = 0;
9617 tw32_mailbox(tp->napi[0].prodmbox, 0);
9618 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9621 /* Make sure the NIC-based send BD rings are disabled. */
9622 if (!tg3_flag(tp, 5705_PLUS)) {
9623 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9624 for (i = 0; i < 16; i++)
9625 tw32_tx_mbox(mbox + i * 8, 0);
9628 /* Clear status block in ram. */
9629 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9631 /* Set status block DMA address */
9632 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9633 ((u64) tnapi->status_mapping >> 32));
9634 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9635 ((u64) tnapi->status_mapping & 0xffffffff));
9637 stblk = HOSTCC_STATBLCK_RING1;
9639 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9640 u64 mapping = (u64)tnapi->status_mapping;
9641 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9642 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9645 /* Clear status block in ram. */
9646 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9649 tg3_tx_rcbs_init(tp);
9650 tg3_rx_ret_rcbs_init(tp);
9653 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9655 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9657 if (!tg3_flag(tp, 5750_PLUS) ||
9658 tg3_flag(tp, 5780_CLASS) ||
9659 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9660 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9661 tg3_flag(tp, 57765_PLUS))
9662 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9663 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9664 tg3_asic_rev(tp) == ASIC_REV_5787)
9665 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9667 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9669 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9670 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9672 val = min(nic_rep_thresh, host_rep_thresh);
9673 tw32(RCVBDI_STD_THRESH, val);
9675 if (tg3_flag(tp, 57765_PLUS))
9676 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9678 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9681 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9683 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9685 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9686 tw32(RCVBDI_JUMBO_THRESH, val);
9688 if (tg3_flag(tp, 57765_PLUS))
9689 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9692 static inline u32 calc_crc(unsigned char *buf, int len)
9700 for (j = 0; j < len; j++) {
9703 for (k = 0; k < 8; k++) {
9716 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9718 /* accept or reject all multicast frames */
9719 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9720 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9722 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9725 static void __tg3_set_rx_mode(struct net_device *dev)
9727 struct tg3 *tp = netdev_priv(dev);
9730 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9731 RX_MODE_KEEP_VLAN_TAG);
9733 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9734 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9737 if (!tg3_flag(tp, ENABLE_ASF))
9738 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9741 if (dev->flags & IFF_PROMISC) {
9742 /* Promiscuous mode. */
9743 rx_mode |= RX_MODE_PROMISC;
9744 } else if (dev->flags & IFF_ALLMULTI) {
9745 /* Accept all multicast. */
9746 tg3_set_multi(tp, 1);
9747 } else if (netdev_mc_empty(dev)) {
9748 /* Reject all multicast. */
9749 tg3_set_multi(tp, 0);
9751 /* Accept one or more multicast(s). */
9752 struct netdev_hw_addr *ha;
9753 u32 mc_filter[4] = { 0, };
9758 netdev_for_each_mc_addr(ha, dev) {
9759 crc = calc_crc(ha->addr, ETH_ALEN);
9761 regidx = (bit & 0x60) >> 5;
9763 mc_filter[regidx] |= (1 << bit);
9766 tw32(MAC_HASH_REG_0, mc_filter[0]);
9767 tw32(MAC_HASH_REG_1, mc_filter[1]);
9768 tw32(MAC_HASH_REG_2, mc_filter[2]);
9769 tw32(MAC_HASH_REG_3, mc_filter[3]);
9772 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9773 rx_mode |= RX_MODE_PROMISC;
9774 } else if (!(dev->flags & IFF_PROMISC)) {
9775 /* Add all entries into to the mac addr filter list */
9777 struct netdev_hw_addr *ha;
9779 netdev_for_each_uc_addr(ha, dev) {
9780 __tg3_set_one_mac_addr(tp, ha->addr,
9781 i + TG3_UCAST_ADDR_IDX(tp));
9786 if (rx_mode != tp->rx_mode) {
9787 tp->rx_mode = rx_mode;
9788 tw32_f(MAC_RX_MODE, rx_mode);
9793 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9797 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9798 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9801 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9805 if (!tg3_flag(tp, SUPPORT_MSIX))
9808 if (tp->rxq_cnt == 1) {
9809 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9813 /* Validate table against current IRQ count */
9814 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9815 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9819 if (i != TG3_RSS_INDIR_TBL_SIZE)
9820 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9823 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9826 u32 reg = MAC_RSS_INDIR_TBL_0;
9828 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9829 u32 val = tp->rss_ind_tbl[i];
9831 for (; i % 8; i++) {
9833 val |= tp->rss_ind_tbl[i];
9840 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9842 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9843 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9845 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9848 /* tp->lock is held. */
9849 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9851 u32 val, rdmac_mode;
9853 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9855 tg3_disable_ints(tp);
9859 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9861 if (tg3_flag(tp, INIT_COMPLETE))
9862 tg3_abort_hw(tp, 1);
9864 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9865 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9866 tg3_phy_pull_config(tp);
9867 tg3_eee_pull_config(tp, NULL);
9868 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9871 /* Enable MAC control of LPI */
9872 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9878 err = tg3_chip_reset(tp);
9882 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9884 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9885 val = tr32(TG3_CPMU_CTRL);
9886 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9887 tw32(TG3_CPMU_CTRL, val);
9889 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9890 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9891 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9892 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9894 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9895 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9896 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9897 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9899 val = tr32(TG3_CPMU_HST_ACC);
9900 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9901 val |= CPMU_HST_ACC_MACCLK_6_25;
9902 tw32(TG3_CPMU_HST_ACC, val);
9905 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9906 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9907 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9908 PCIE_PWR_MGMT_L1_THRESH_4MS;
9909 tw32(PCIE_PWR_MGMT_THRESH, val);
9911 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9912 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9914 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9916 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9917 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9920 if (tg3_flag(tp, L1PLLPD_EN)) {
9921 u32 grc_mode = tr32(GRC_MODE);
9923 /* Access the lower 1K of PL PCIE block registers. */
9924 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9925 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9927 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9928 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9929 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9931 tw32(GRC_MODE, grc_mode);
9934 if (tg3_flag(tp, 57765_CLASS)) {
9935 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9936 u32 grc_mode = tr32(GRC_MODE);
9938 /* Access the lower 1K of PL PCIE block registers. */
9939 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9940 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9942 val = tr32(TG3_PCIE_TLDLPL_PORT +
9943 TG3_PCIE_PL_LO_PHYCTL5);
9944 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9945 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9947 tw32(GRC_MODE, grc_mode);
9950 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9953 /* Fix transmit hangs */
9954 val = tr32(TG3_CPMU_PADRNG_CTL);
9955 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9956 tw32(TG3_CPMU_PADRNG_CTL, val);
9958 grc_mode = tr32(GRC_MODE);
9960 /* Access the lower 1K of DL PCIE block registers. */
9961 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9962 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9964 val = tr32(TG3_PCIE_TLDLPL_PORT +
9965 TG3_PCIE_DL_LO_FTSMAX);
9966 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9967 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9968 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9970 tw32(GRC_MODE, grc_mode);
9973 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9974 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9975 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9976 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9979 /* This works around an issue with Athlon chipsets on
9980 * B3 tigon3 silicon. This bit has no effect on any
9981 * other revision. But do not set this on PCI Express
9982 * chips and don't even touch the clocks if the CPMU is present.
9984 if (!tg3_flag(tp, CPMU_PRESENT)) {
9985 if (!tg3_flag(tp, PCI_EXPRESS))
9986 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9987 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9990 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9991 tg3_flag(tp, PCIX_MODE)) {
9992 val = tr32(TG3PCI_PCISTATE);
9993 val |= PCISTATE_RETRY_SAME_DMA;
9994 tw32(TG3PCI_PCISTATE, val);
9997 if (tg3_flag(tp, ENABLE_APE)) {
9998 /* Allow reads and writes to the
9999 * APE register and memory space.
10001 val = tr32(TG3PCI_PCISTATE);
10002 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10003 PCISTATE_ALLOW_APE_SHMEM_WR |
10004 PCISTATE_ALLOW_APE_PSPACE_WR;
10005 tw32(TG3PCI_PCISTATE, val);
10008 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10009 /* Enable some hw fixes. */
10010 val = tr32(TG3PCI_MSI_DATA);
10011 val |= (1 << 26) | (1 << 28) | (1 << 29);
10012 tw32(TG3PCI_MSI_DATA, val);
10015 /* Descriptor ring init may make accesses to the
10016 * NIC SRAM area to setup the TX descriptors, so we
10017 * can only do this after the hardware has been
10018 * successfully reset.
10020 err = tg3_init_rings(tp);
10024 if (tg3_flag(tp, 57765_PLUS)) {
10025 val = tr32(TG3PCI_DMA_RW_CTRL) &
10026 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10027 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10028 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10029 if (!tg3_flag(tp, 57765_CLASS) &&
10030 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10031 tg3_asic_rev(tp) != ASIC_REV_5762)
10032 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10033 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10034 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10035 tg3_asic_rev(tp) != ASIC_REV_5761) {
10036 /* This value is determined during the probe time DMA
10037 * engine test, tg3_test_dma.
10039 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10042 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10043 GRC_MODE_4X_NIC_SEND_RINGS |
10044 GRC_MODE_NO_TX_PHDR_CSUM |
10045 GRC_MODE_NO_RX_PHDR_CSUM);
10046 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10048 /* Pseudo-header checksum is done by hardware logic and not
10049 * the offload processers, so make the chip do the pseudo-
10050 * header checksums on receive. For transmit it is more
10051 * convenient to do the pseudo-header checksum in software
10052 * as Linux does that on transmit for us in all cases.
10054 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10056 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10058 tw32(TG3_RX_PTP_CTL,
10059 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10061 if (tg3_flag(tp, PTP_CAPABLE))
10062 val |= GRC_MODE_TIME_SYNC_ENABLE;
10064 tw32(GRC_MODE, tp->grc_mode | val);
10066 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10067 * south bridge limitation. As a workaround, Driver is setting MRRS
10068 * to 2048 instead of default 4096.
10070 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10071 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10072 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10073 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10076 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10077 val = tr32(GRC_MISC_CFG);
10079 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10080 tw32(GRC_MISC_CFG, val);
10082 /* Initialize MBUF/DESC pool. */
10083 if (tg3_flag(tp, 5750_PLUS)) {
10085 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10086 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10087 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10088 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10090 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10091 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10092 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10093 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10096 fw_len = tp->fw_len;
10097 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10098 tw32(BUFMGR_MB_POOL_ADDR,
10099 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10100 tw32(BUFMGR_MB_POOL_SIZE,
10101 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10104 if (tp->dev->mtu <= ETH_DATA_LEN) {
10105 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10106 tp->bufmgr_config.mbuf_read_dma_low_water);
10107 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10108 tp->bufmgr_config.mbuf_mac_rx_low_water);
10109 tw32(BUFMGR_MB_HIGH_WATER,
10110 tp->bufmgr_config.mbuf_high_water);
10112 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10114 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10116 tw32(BUFMGR_MB_HIGH_WATER,
10117 tp->bufmgr_config.mbuf_high_water_jumbo);
10119 tw32(BUFMGR_DMA_LOW_WATER,
10120 tp->bufmgr_config.dma_low_water);
10121 tw32(BUFMGR_DMA_HIGH_WATER,
10122 tp->bufmgr_config.dma_high_water);
10124 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10125 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10126 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10127 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10128 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10129 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10130 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10131 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10132 tw32(BUFMGR_MODE, val);
10133 for (i = 0; i < 2000; i++) {
10134 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10139 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10143 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10144 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10146 tg3_setup_rxbd_thresholds(tp);
10148 /* Initialize TG3_BDINFO's at:
10149 * RCVDBDI_STD_BD: standard eth size rx ring
10150 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10151 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10154 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10155 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10156 * ring attribute flags
10157 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10159 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10160 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10162 * The size of each ring is fixed in the firmware, but the location is
10165 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10166 ((u64) tpr->rx_std_mapping >> 32));
10167 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10168 ((u64) tpr->rx_std_mapping & 0xffffffff));
10169 if (!tg3_flag(tp, 5717_PLUS))
10170 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10171 NIC_SRAM_RX_BUFFER_DESC);
10173 /* Disable the mini ring */
10174 if (!tg3_flag(tp, 5705_PLUS))
10175 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10176 BDINFO_FLAGS_DISABLED);
10178 /* Program the jumbo buffer descriptor ring control
10179 * blocks on those devices that have them.
10181 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10182 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10184 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10185 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10186 ((u64) tpr->rx_jmb_mapping >> 32));
10187 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10188 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10189 val = TG3_RX_JMB_RING_SIZE(tp) <<
10190 BDINFO_FLAGS_MAXLEN_SHIFT;
10191 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10192 val | BDINFO_FLAGS_USE_EXT_RECV);
10193 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10194 tg3_flag(tp, 57765_CLASS) ||
10195 tg3_asic_rev(tp) == ASIC_REV_5762)
10196 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10197 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10199 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10200 BDINFO_FLAGS_DISABLED);
10203 if (tg3_flag(tp, 57765_PLUS)) {
10204 val = TG3_RX_STD_RING_SIZE(tp);
10205 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10206 val |= (TG3_RX_STD_DMA_SZ << 2);
10208 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10210 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10212 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10214 tpr->rx_std_prod_idx = tp->rx_pending;
10215 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10217 tpr->rx_jmb_prod_idx =
10218 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10219 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10221 tg3_rings_reset(tp);
10223 /* Initialize MAC address and backoff seed. */
10224 __tg3_set_mac_addr(tp, false);
10226 /* MTU + ethernet header + FCS + optional VLAN tag */
10227 tw32(MAC_RX_MTU_SIZE,
10228 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10230 /* The slot time is changed by tg3_setup_phy if we
10231 * run at gigabit with half duplex.
10233 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10234 (6 << TX_LENGTHS_IPG_SHIFT) |
10235 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10237 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10238 tg3_asic_rev(tp) == ASIC_REV_5762)
10239 val |= tr32(MAC_TX_LENGTHS) &
10240 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10241 TX_LENGTHS_CNT_DWN_VAL_MSK);
10243 tw32(MAC_TX_LENGTHS, val);
10245 /* Receive rules. */
10246 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10247 tw32(RCVLPC_CONFIG, 0x0181);
10249 /* Calculate RDMAC_MODE setting early, we need it to determine
10250 * the RCVLPC_STATE_ENABLE mask.
10252 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10253 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10254 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10255 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10256 RDMAC_MODE_LNGREAD_ENAB);
10258 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10259 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10261 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10262 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10263 tg3_asic_rev(tp) == ASIC_REV_57780)
10264 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10265 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10266 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10268 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10269 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10270 if (tg3_flag(tp, TSO_CAPABLE) &&
10271 tg3_asic_rev(tp) == ASIC_REV_5705) {
10272 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10273 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10274 !tg3_flag(tp, IS_5788)) {
10275 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10279 if (tg3_flag(tp, PCI_EXPRESS))
10280 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10282 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10284 if (tp->dev->mtu <= ETH_DATA_LEN) {
10285 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10286 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10290 if (tg3_flag(tp, HW_TSO_1) ||
10291 tg3_flag(tp, HW_TSO_2) ||
10292 tg3_flag(tp, HW_TSO_3))
10293 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10295 if (tg3_flag(tp, 57765_PLUS) ||
10296 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297 tg3_asic_rev(tp) == ASIC_REV_57780)
10298 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10300 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301 tg3_asic_rev(tp) == ASIC_REV_5762)
10302 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10304 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10306 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10307 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10308 tg3_flag(tp, 57765_PLUS)) {
10311 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10312 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10314 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10316 val = tr32(tgtreg);
10317 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762) {
10319 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10320 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10321 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10322 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10323 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10324 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10326 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10329 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10330 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10331 tg3_asic_rev(tp) == ASIC_REV_5762) {
10334 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10335 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10337 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10339 val = tr32(tgtreg);
10341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10345 /* Receive/send statistics. */
10346 if (tg3_flag(tp, 5750_PLUS)) {
10347 val = tr32(RCVLPC_STATS_ENABLE);
10348 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10349 tw32(RCVLPC_STATS_ENABLE, val);
10350 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10351 tg3_flag(tp, TSO_CAPABLE)) {
10352 val = tr32(RCVLPC_STATS_ENABLE);
10353 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10354 tw32(RCVLPC_STATS_ENABLE, val);
10356 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10358 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10359 tw32(SNDDATAI_STATSENAB, 0xffffff);
10360 tw32(SNDDATAI_STATSCTRL,
10361 (SNDDATAI_SCTRL_ENABLE |
10362 SNDDATAI_SCTRL_FASTUPD));
10364 /* Setup host coalescing engine. */
10365 tw32(HOSTCC_MODE, 0);
10366 for (i = 0; i < 2000; i++) {
10367 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10372 __tg3_set_coalesce(tp, &tp->coal);
10374 if (!tg3_flag(tp, 5705_PLUS)) {
10375 /* Status/statistics block address. See tg3_timer,
10376 * the tg3_periodic_fetch_stats call there, and
10377 * tg3_get_stats to see how this works for 5705/5750 chips.
10379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10380 ((u64) tp->stats_mapping >> 32));
10381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10382 ((u64) tp->stats_mapping & 0xffffffff));
10383 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10387 /* Clear statistics and status block memory areas */
10388 for (i = NIC_SRAM_STATS_BLK;
10389 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10390 i += sizeof(u32)) {
10391 tg3_write_mem(tp, i, 0);
10396 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10398 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10399 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10400 if (!tg3_flag(tp, 5705_PLUS))
10401 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10403 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10405 /* reset to prevent losing 1st rx packet intermittently */
10406 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10410 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10411 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10412 MAC_MODE_FHDE_ENABLE;
10413 if (tg3_flag(tp, ENABLE_APE))
10414 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10415 if (!tg3_flag(tp, 5705_PLUS) &&
10416 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10417 tg3_asic_rev(tp) != ASIC_REV_5700)
10418 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10419 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10422 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10423 * If TG3_FLAG_IS_NIC is zero, we should read the
10424 * register to preserve the GPIO settings for LOMs. The GPIOs,
10425 * whether used as inputs or outputs, are set by boot code after
10428 if (!tg3_flag(tp, IS_NIC)) {
10431 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10432 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10433 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10435 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10436 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10437 GRC_LCLCTRL_GPIO_OUTPUT3;
10439 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10440 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10442 tp->grc_local_ctrl &= ~gpio_mask;
10443 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10445 /* GPIO1 must be driven high for eeprom write protect */
10446 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10447 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10448 GRC_LCLCTRL_GPIO_OUTPUT1);
10450 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10453 if (tg3_flag(tp, USING_MSIX)) {
10454 val = tr32(MSGINT_MODE);
10455 val |= MSGINT_MODE_ENABLE;
10456 if (tp->irq_cnt > 1)
10457 val |= MSGINT_MODE_MULTIVEC_EN;
10458 if (!tg3_flag(tp, 1SHOT_MSI))
10459 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10460 tw32(MSGINT_MODE, val);
10463 if (!tg3_flag(tp, 5705_PLUS)) {
10464 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10468 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10469 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10470 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10471 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10472 WDMAC_MODE_LNGREAD_ENAB);
10474 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10475 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10476 if (tg3_flag(tp, TSO_CAPABLE) &&
10477 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10478 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10480 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10481 !tg3_flag(tp, IS_5788)) {
10482 val |= WDMAC_MODE_RX_ACCEL;
10486 /* Enable host coalescing bug fix */
10487 if (tg3_flag(tp, 5755_PLUS))
10488 val |= WDMAC_MODE_STATUS_TAG_FIX;
10490 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10491 val |= WDMAC_MODE_BURST_ALL_DATA;
10493 tw32_f(WDMAC_MODE, val);
10496 if (tg3_flag(tp, PCIX_MODE)) {
10499 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10501 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10502 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10503 pcix_cmd |= PCI_X_CMD_READ_2K;
10504 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10505 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10506 pcix_cmd |= PCI_X_CMD_READ_2K;
10508 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10512 tw32_f(RDMAC_MODE, rdmac_mode);
10515 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10516 tg3_asic_rev(tp) == ASIC_REV_5720) {
10517 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10518 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10521 if (i < TG3_NUM_RDMA_CHANNELS) {
10522 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10523 val |= tg3_lso_rd_dma_workaround_bit(tp);
10524 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10525 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10529 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10530 if (!tg3_flag(tp, 5705_PLUS))
10531 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10533 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10534 tw32(SNDDATAC_MODE,
10535 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10537 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10539 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10540 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10541 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10542 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10543 val |= RCVDBDI_MODE_LRG_RING_SZ;
10544 tw32(RCVDBDI_MODE, val);
10545 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10546 if (tg3_flag(tp, HW_TSO_1) ||
10547 tg3_flag(tp, HW_TSO_2) ||
10548 tg3_flag(tp, HW_TSO_3))
10549 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10550 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10551 if (tg3_flag(tp, ENABLE_TSS))
10552 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10553 tw32(SNDBDI_MODE, val);
10554 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10556 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10557 err = tg3_load_5701_a0_firmware_fix(tp);
10562 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10563 /* Ignore any errors for the firmware download. If download
10564 * fails, the device will operate with EEE disabled
10566 tg3_load_57766_firmware(tp);
10569 if (tg3_flag(tp, TSO_CAPABLE)) {
10570 err = tg3_load_tso_firmware(tp);
10575 tp->tx_mode = TX_MODE_ENABLE;
10577 if (tg3_flag(tp, 5755_PLUS) ||
10578 tg3_asic_rev(tp) == ASIC_REV_5906)
10579 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10581 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10582 tg3_asic_rev(tp) == ASIC_REV_5762) {
10583 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10584 tp->tx_mode &= ~val;
10585 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10588 tw32_f(MAC_TX_MODE, tp->tx_mode);
10591 if (tg3_flag(tp, ENABLE_RSS)) {
10594 tg3_rss_write_indir_tbl(tp);
10596 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10598 for (i = 0; i < 10 ; i++)
10599 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10602 tp->rx_mode = RX_MODE_ENABLE;
10603 if (tg3_flag(tp, 5755_PLUS))
10604 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10606 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10607 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10609 if (tg3_flag(tp, ENABLE_RSS))
10610 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10611 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10612 RX_MODE_RSS_IPV6_HASH_EN |
10613 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10614 RX_MODE_RSS_IPV4_HASH_EN |
10615 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10617 tw32_f(MAC_RX_MODE, tp->rx_mode);
10620 tw32(MAC_LED_CTRL, tp->led_ctrl);
10622 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10623 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10624 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10627 tw32_f(MAC_RX_MODE, tp->rx_mode);
10630 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10631 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10632 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10633 /* Set drive transmission level to 1.2V */
10634 /* only if the signal pre-emphasis bit is not set */
10635 val = tr32(MAC_SERDES_CFG);
10638 tw32(MAC_SERDES_CFG, val);
10640 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10641 tw32(MAC_SERDES_CFG, 0x616000);
10644 /* Prevent chip from dropping frames when flow control
10647 if (tg3_flag(tp, 57765_CLASS))
10651 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10653 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10654 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10655 /* Use hardware link auto-negotiation */
10656 tg3_flag_set(tp, HW_AUTONEG);
10659 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10660 tg3_asic_rev(tp) == ASIC_REV_5714) {
10663 tmp = tr32(SERDES_RX_CTRL);
10664 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10665 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10666 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10667 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10670 if (!tg3_flag(tp, USE_PHYLIB)) {
10671 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10672 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10674 err = tg3_setup_phy(tp, false);
10678 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10679 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10682 /* Clear CRC stats. */
10683 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10684 tg3_writephy(tp, MII_TG3_TEST1,
10685 tmp | MII_TG3_TEST1_CRC_EN);
10686 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10691 __tg3_set_rx_mode(tp->dev);
10693 /* Initialize receive rules. */
10694 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10697 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10699 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10703 if (tg3_flag(tp, ENABLE_ASF))
10707 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10709 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10711 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10713 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10715 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10717 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10719 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10721 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10723 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10725 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10727 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10729 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10731 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10733 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10741 if (tg3_flag(tp, ENABLE_APE))
10742 /* Write our heartbeat update interval to APE. */
10743 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10744 APE_HOST_HEARTBEAT_INT_5SEC);
10746 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10751 /* Called at device open time to get the chip ready for
10752 * packet processing. Invoked with tp->lock held.
10754 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10756 /* Chip may have been just powered on. If so, the boot code may still
10757 * be running initialization. Wait for it to finish to avoid races in
10758 * accessing the hardware.
10760 tg3_enable_register_access(tp);
10763 tg3_switch_clocks(tp);
10765 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10767 return tg3_reset_hw(tp, reset_phy);
10770 #ifdef CONFIG_TIGON3_HWMON
10771 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10775 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10776 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10778 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10781 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10782 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10783 memset(ocir, 0, TG3_OCIR_LEN);
10787 /* sysfs attributes for hwmon */
10788 static ssize_t tg3_show_temp(struct device *dev,
10789 struct device_attribute *devattr, char *buf)
10791 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10792 struct tg3 *tp = dev_get_drvdata(dev);
10795 spin_lock_bh(&tp->lock);
10796 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10797 sizeof(temperature));
10798 spin_unlock_bh(&tp->lock);
10799 return sprintf(buf, "%u\n", temperature * 1000);
10803 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10804 TG3_TEMP_SENSOR_OFFSET);
10805 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10806 TG3_TEMP_CAUTION_OFFSET);
10807 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10808 TG3_TEMP_MAX_OFFSET);
10810 static struct attribute *tg3_attrs[] = {
10811 &sensor_dev_attr_temp1_input.dev_attr.attr,
10812 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10813 &sensor_dev_attr_temp1_max.dev_attr.attr,
10816 ATTRIBUTE_GROUPS(tg3);
10818 static void tg3_hwmon_close(struct tg3 *tp)
10820 if (tp->hwmon_dev) {
10821 hwmon_device_unregister(tp->hwmon_dev);
10822 tp->hwmon_dev = NULL;
10826 static void tg3_hwmon_open(struct tg3 *tp)
10830 struct pci_dev *pdev = tp->pdev;
10831 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10833 tg3_sd_scan_scratchpad(tp, ocirs);
10835 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10836 if (!ocirs[i].src_data_length)
10839 size += ocirs[i].src_hdr_length;
10840 size += ocirs[i].src_data_length;
10846 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10848 if (IS_ERR(tp->hwmon_dev)) {
10849 tp->hwmon_dev = NULL;
10850 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10854 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10855 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10856 #endif /* CONFIG_TIGON3_HWMON */
10859 #define TG3_STAT_ADD32(PSTAT, REG) \
10860 do { u32 __val = tr32(REG); \
10861 (PSTAT)->low += __val; \
10862 if ((PSTAT)->low < __val) \
10863 (PSTAT)->high += 1; \
10866 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10868 struct tg3_hw_stats *sp = tp->hw_stats;
10873 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10874 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10875 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10876 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10877 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10878 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10879 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10880 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10881 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10882 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10883 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10884 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10885 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10886 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10887 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10888 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10891 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10892 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10893 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10894 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10897 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10898 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10899 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10900 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10901 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10902 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10903 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10904 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10905 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10906 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10907 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10908 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10909 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10910 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10912 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10913 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10914 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10915 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10916 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10917 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10919 u32 val = tr32(HOSTCC_FLOW_ATTN);
10920 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10922 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10923 sp->rx_discards.low += val;
10924 if (sp->rx_discards.low < val)
10925 sp->rx_discards.high += 1;
10927 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10929 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10932 static void tg3_chk_missed_msi(struct tg3 *tp)
10936 for (i = 0; i < tp->irq_cnt; i++) {
10937 struct tg3_napi *tnapi = &tp->napi[i];
10939 if (tg3_has_work(tnapi)) {
10940 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10941 tnapi->last_tx_cons == tnapi->tx_cons) {
10942 if (tnapi->chk_msi_cnt < 1) {
10943 tnapi->chk_msi_cnt++;
10949 tnapi->chk_msi_cnt = 0;
10950 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10951 tnapi->last_tx_cons = tnapi->tx_cons;
10955 static void tg3_timer(struct timer_list *t)
10957 struct tg3 *tp = from_timer(tp, t, timer);
10959 spin_lock(&tp->lock);
10961 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10962 spin_unlock(&tp->lock);
10963 goto restart_timer;
10966 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10967 tg3_flag(tp, 57765_CLASS))
10968 tg3_chk_missed_msi(tp);
10970 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10971 /* BCM4785: Flush posted writes from GbE to host memory. */
10975 if (!tg3_flag(tp, TAGGED_STATUS)) {
10976 /* All of this garbage is because when using non-tagged
10977 * IRQ status the mailbox/status_block protocol the chip
10978 * uses with the cpu is race prone.
10980 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10981 tw32(GRC_LOCAL_CTRL,
10982 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10984 tw32(HOSTCC_MODE, tp->coalesce_mode |
10985 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10988 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10989 spin_unlock(&tp->lock);
10990 tg3_reset_task_schedule(tp);
10991 goto restart_timer;
10995 /* This part only runs once per second. */
10996 if (!--tp->timer_counter) {
10997 if (tg3_flag(tp, 5705_PLUS))
10998 tg3_periodic_fetch_stats(tp);
11000 if (tp->setlpicnt && !--tp->setlpicnt)
11001 tg3_phy_eee_enable(tp);
11003 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11007 mac_stat = tr32(MAC_STATUS);
11010 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11011 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11013 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11017 tg3_setup_phy(tp, false);
11018 } else if (tg3_flag(tp, POLL_SERDES)) {
11019 u32 mac_stat = tr32(MAC_STATUS);
11020 int need_setup = 0;
11023 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11026 if (!tp->link_up &&
11027 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11028 MAC_STATUS_SIGNAL_DET))) {
11032 if (!tp->serdes_counter) {
11035 ~MAC_MODE_PORT_MODE_MASK));
11037 tw32_f(MAC_MODE, tp->mac_mode);
11040 tg3_setup_phy(tp, false);
11042 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11043 tg3_flag(tp, 5780_CLASS)) {
11044 tg3_serdes_parallel_detect(tp);
11045 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11046 u32 cpmu = tr32(TG3_CPMU_STATUS);
11047 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11048 TG3_CPMU_STATUS_LINK_MASK);
11050 if (link_up != tp->link_up)
11051 tg3_setup_phy(tp, false);
11054 tp->timer_counter = tp->timer_multiplier;
11057 /* Heartbeat is only sent once every 2 seconds.
11059 * The heartbeat is to tell the ASF firmware that the host
11060 * driver is still alive. In the event that the OS crashes,
11061 * ASF needs to reset the hardware to free up the FIFO space
11062 * that may be filled with rx packets destined for the host.
11063 * If the FIFO is full, ASF will no longer function properly.
11065 * Unintended resets have been reported on real time kernels
11066 * where the timer doesn't run on time. Netpoll will also have
11069 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11070 * to check the ring condition when the heartbeat is expiring
11071 * before doing the reset. This will prevent most unintended
11074 if (!--tp->asf_counter) {
11075 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11076 tg3_wait_for_event_ack(tp);
11078 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11079 FWCMD_NICDRV_ALIVE3);
11080 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11081 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11082 TG3_FW_UPDATE_TIMEOUT_SEC);
11084 tg3_generate_fw_event(tp);
11086 tp->asf_counter = tp->asf_multiplier;
11089 /* Update the APE heartbeat every 5 seconds.*/
11090 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11092 spin_unlock(&tp->lock);
11095 tp->timer.expires = jiffies + tp->timer_offset;
11096 add_timer(&tp->timer);
11099 static void tg3_timer_init(struct tg3 *tp)
11101 if (tg3_flag(tp, TAGGED_STATUS) &&
11102 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11103 !tg3_flag(tp, 57765_CLASS))
11104 tp->timer_offset = HZ;
11106 tp->timer_offset = HZ / 10;
11108 BUG_ON(tp->timer_offset > HZ);
11110 tp->timer_multiplier = (HZ / tp->timer_offset);
11111 tp->asf_multiplier = (HZ / tp->timer_offset) *
11112 TG3_FW_UPDATE_FREQ_SEC;
11114 timer_setup(&tp->timer, tg3_timer, 0);
11117 static void tg3_timer_start(struct tg3 *tp)
11119 tp->asf_counter = tp->asf_multiplier;
11120 tp->timer_counter = tp->timer_multiplier;
11122 tp->timer.expires = jiffies + tp->timer_offset;
11123 add_timer(&tp->timer);
11126 static void tg3_timer_stop(struct tg3 *tp)
11128 del_timer_sync(&tp->timer);
11131 /* Restart hardware after configuration changes, self-test, etc.
11132 * Invoked with tp->lock held.
11134 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11135 __releases(tp->lock)
11136 __acquires(tp->lock)
11140 err = tg3_init_hw(tp, reset_phy);
11142 netdev_err(tp->dev,
11143 "Failed to re-initialize device, aborting\n");
11144 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11145 tg3_full_unlock(tp);
11146 tg3_timer_stop(tp);
11148 tg3_napi_enable(tp);
11149 dev_close(tp->dev);
11150 tg3_full_lock(tp, 0);
11155 static void tg3_reset_task(struct work_struct *work)
11157 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11161 tg3_full_lock(tp, 0);
11163 if (!netif_running(tp->dev)) {
11164 tg3_flag_clear(tp, RESET_TASK_PENDING);
11165 tg3_full_unlock(tp);
11170 tg3_full_unlock(tp);
11174 tg3_netif_stop(tp);
11176 tg3_full_lock(tp, 1);
11178 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11179 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11180 tp->write32_rx_mbox = tg3_write_flush_reg32;
11181 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11182 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11185 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11186 err = tg3_init_hw(tp, true);
11190 tg3_netif_start(tp);
11193 tg3_full_unlock(tp);
11198 tg3_flag_clear(tp, RESET_TASK_PENDING);
11202 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11205 unsigned long flags;
11207 struct tg3_napi *tnapi = &tp->napi[irq_num];
11209 if (tp->irq_cnt == 1)
11210 name = tp->dev->name;
11212 name = &tnapi->irq_lbl[0];
11213 if (tnapi->tx_buffers && tnapi->rx_rcb)
11214 snprintf(name, IFNAMSIZ,
11215 "%s-txrx-%d", tp->dev->name, irq_num);
11216 else if (tnapi->tx_buffers)
11217 snprintf(name, IFNAMSIZ,
11218 "%s-tx-%d", tp->dev->name, irq_num);
11219 else if (tnapi->rx_rcb)
11220 snprintf(name, IFNAMSIZ,
11221 "%s-rx-%d", tp->dev->name, irq_num);
11223 snprintf(name, IFNAMSIZ,
11224 "%s-%d", tp->dev->name, irq_num);
11225 name[IFNAMSIZ-1] = 0;
11228 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11230 if (tg3_flag(tp, 1SHOT_MSI))
11231 fn = tg3_msi_1shot;
11234 fn = tg3_interrupt;
11235 if (tg3_flag(tp, TAGGED_STATUS))
11236 fn = tg3_interrupt_tagged;
11237 flags = IRQF_SHARED;
11240 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11243 static int tg3_test_interrupt(struct tg3 *tp)
11245 struct tg3_napi *tnapi = &tp->napi[0];
11246 struct net_device *dev = tp->dev;
11247 int err, i, intr_ok = 0;
11250 if (!netif_running(dev))
11253 tg3_disable_ints(tp);
11255 free_irq(tnapi->irq_vec, tnapi);
11258 * Turn off MSI one shot mode. Otherwise this test has no
11259 * observable way to know whether the interrupt was delivered.
11261 if (tg3_flag(tp, 57765_PLUS)) {
11262 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11263 tw32(MSGINT_MODE, val);
11266 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11267 IRQF_SHARED, dev->name, tnapi);
11271 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11272 tg3_enable_ints(tp);
11274 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11277 for (i = 0; i < 5; i++) {
11278 u32 int_mbox, misc_host_ctrl;
11280 int_mbox = tr32_mailbox(tnapi->int_mbox);
11281 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11283 if ((int_mbox != 0) ||
11284 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11289 if (tg3_flag(tp, 57765_PLUS) &&
11290 tnapi->hw_status->status_tag != tnapi->last_tag)
11291 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11296 tg3_disable_ints(tp);
11298 free_irq(tnapi->irq_vec, tnapi);
11300 err = tg3_request_irq(tp, 0);
11306 /* Reenable MSI one shot mode. */
11307 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11308 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11309 tw32(MSGINT_MODE, val);
11317 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11318 * successfully restored
11320 static int tg3_test_msi(struct tg3 *tp)
11325 if (!tg3_flag(tp, USING_MSI))
11328 /* Turn off SERR reporting in case MSI terminates with Master
11331 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11332 pci_write_config_word(tp->pdev, PCI_COMMAND,
11333 pci_cmd & ~PCI_COMMAND_SERR);
11335 err = tg3_test_interrupt(tp);
11337 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11342 /* other failures */
11346 /* MSI test failed, go back to INTx mode */
11347 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11348 "to INTx mode. Please report this failure to the PCI "
11349 "maintainer and include system chipset information\n");
11351 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11353 pci_disable_msi(tp->pdev);
11355 tg3_flag_clear(tp, USING_MSI);
11356 tp->napi[0].irq_vec = tp->pdev->irq;
11358 err = tg3_request_irq(tp, 0);
11362 /* Need to reset the chip because the MSI cycle may have terminated
11363 * with Master Abort.
11365 tg3_full_lock(tp, 1);
11367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11368 err = tg3_init_hw(tp, true);
11370 tg3_full_unlock(tp);
11373 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11378 static int tg3_request_firmware(struct tg3 *tp)
11380 const struct tg3_firmware_hdr *fw_hdr;
11382 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11383 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11388 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11390 /* Firmware blob starts with version numbers, followed by
11391 * start address and _full_ length including BSS sections
11392 * (which must be longer than the actual data, of course
11395 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11396 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11397 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11398 tp->fw_len, tp->fw_needed);
11399 release_firmware(tp->fw);
11404 /* We no longer need firmware; we have it. */
11405 tp->fw_needed = NULL;
11409 static u32 tg3_irq_count(struct tg3 *tp)
11411 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11414 /* We want as many rx rings enabled as there are cpus.
11415 * In multiqueue MSI-X mode, the first MSI-X vector
11416 * only deals with link interrupts, etc, so we add
11417 * one to the number of vectors we are requesting.
11419 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11425 static bool tg3_enable_msix(struct tg3 *tp)
11428 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11430 tp->txq_cnt = tp->txq_req;
11431 tp->rxq_cnt = tp->rxq_req;
11433 tp->rxq_cnt = netif_get_num_default_rss_queues();
11434 if (tp->rxq_cnt > tp->rxq_max)
11435 tp->rxq_cnt = tp->rxq_max;
11437 /* Disable multiple TX rings by default. Simple round-robin hardware
11438 * scheduling of the TX rings can cause starvation of rings with
11439 * small packets when other rings have TSO or jumbo packets.
11444 tp->irq_cnt = tg3_irq_count(tp);
11446 for (i = 0; i < tp->irq_max; i++) {
11447 msix_ent[i].entry = i;
11448 msix_ent[i].vector = 0;
11451 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11454 } else if (rc < tp->irq_cnt) {
11455 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11458 tp->rxq_cnt = max(rc - 1, 1);
11460 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11463 for (i = 0; i < tp->irq_max; i++)
11464 tp->napi[i].irq_vec = msix_ent[i].vector;
11466 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11467 pci_disable_msix(tp->pdev);
11471 if (tp->irq_cnt == 1)
11474 tg3_flag_set(tp, ENABLE_RSS);
11476 if (tp->txq_cnt > 1)
11477 tg3_flag_set(tp, ENABLE_TSS);
11479 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11484 static void tg3_ints_init(struct tg3 *tp)
11486 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11487 !tg3_flag(tp, TAGGED_STATUS)) {
11488 /* All MSI supporting chips should support tagged
11489 * status. Assert that this is the case.
11491 netdev_warn(tp->dev,
11492 "MSI without TAGGED_STATUS? Not using MSI\n");
11496 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11497 tg3_flag_set(tp, USING_MSIX);
11498 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11499 tg3_flag_set(tp, USING_MSI);
11501 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11502 u32 msi_mode = tr32(MSGINT_MODE);
11503 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11504 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11505 if (!tg3_flag(tp, 1SHOT_MSI))
11506 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11507 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11510 if (!tg3_flag(tp, USING_MSIX)) {
11512 tp->napi[0].irq_vec = tp->pdev->irq;
11515 if (tp->irq_cnt == 1) {
11518 netif_set_real_num_tx_queues(tp->dev, 1);
11519 netif_set_real_num_rx_queues(tp->dev, 1);
11523 static void tg3_ints_fini(struct tg3 *tp)
11525 if (tg3_flag(tp, USING_MSIX))
11526 pci_disable_msix(tp->pdev);
11527 else if (tg3_flag(tp, USING_MSI))
11528 pci_disable_msi(tp->pdev);
11529 tg3_flag_clear(tp, USING_MSI);
11530 tg3_flag_clear(tp, USING_MSIX);
11531 tg3_flag_clear(tp, ENABLE_RSS);
11532 tg3_flag_clear(tp, ENABLE_TSS);
11535 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11538 struct net_device *dev = tp->dev;
11542 * Setup interrupts first so we know how
11543 * many NAPI resources to allocate
11547 tg3_rss_check_indir_tbl(tp);
11549 /* The placement of this call is tied
11550 * to the setup and use of Host TX descriptors.
11552 err = tg3_alloc_consistent(tp);
11554 goto out_ints_fini;
11558 tg3_napi_enable(tp);
11560 for (i = 0; i < tp->irq_cnt; i++) {
11561 err = tg3_request_irq(tp, i);
11563 for (i--; i >= 0; i--) {
11564 struct tg3_napi *tnapi = &tp->napi[i];
11566 free_irq(tnapi->irq_vec, tnapi);
11568 goto out_napi_fini;
11572 tg3_full_lock(tp, 0);
11575 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11577 err = tg3_init_hw(tp, reset_phy);
11579 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11580 tg3_free_rings(tp);
11583 tg3_full_unlock(tp);
11588 if (test_irq && tg3_flag(tp, USING_MSI)) {
11589 err = tg3_test_msi(tp);
11592 tg3_full_lock(tp, 0);
11593 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11594 tg3_free_rings(tp);
11595 tg3_full_unlock(tp);
11597 goto out_napi_fini;
11600 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11601 u32 val = tr32(PCIE_TRANSACTION_CFG);
11603 tw32(PCIE_TRANSACTION_CFG,
11604 val | PCIE_TRANS_CFG_1SHOT_MSI);
11610 tg3_hwmon_open(tp);
11612 tg3_full_lock(tp, 0);
11614 tg3_timer_start(tp);
11615 tg3_flag_set(tp, INIT_COMPLETE);
11616 tg3_enable_ints(tp);
11618 tg3_ptp_resume(tp);
11620 tg3_full_unlock(tp);
11622 netif_tx_start_all_queues(dev);
11625 * Reset loopback feature if it was turned on while the device was down
11626 * make sure that it's installed properly now.
11628 if (dev->features & NETIF_F_LOOPBACK)
11629 tg3_set_loopback(dev, dev->features);
11634 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11635 struct tg3_napi *tnapi = &tp->napi[i];
11636 free_irq(tnapi->irq_vec, tnapi);
11640 tg3_napi_disable(tp);
11642 tg3_free_consistent(tp);
11650 static void tg3_stop(struct tg3 *tp)
11654 tg3_reset_task_cancel(tp);
11655 tg3_netif_stop(tp);
11657 tg3_timer_stop(tp);
11659 tg3_hwmon_close(tp);
11663 tg3_full_lock(tp, 1);
11665 tg3_disable_ints(tp);
11667 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11668 tg3_free_rings(tp);
11669 tg3_flag_clear(tp, INIT_COMPLETE);
11671 tg3_full_unlock(tp);
11673 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11674 struct tg3_napi *tnapi = &tp->napi[i];
11675 free_irq(tnapi->irq_vec, tnapi);
11682 tg3_free_consistent(tp);
11685 static int tg3_open(struct net_device *dev)
11687 struct tg3 *tp = netdev_priv(dev);
11690 if (tp->pcierr_recovery) {
11691 netdev_err(dev, "Failed to open device. PCI error recovery "
11696 if (tp->fw_needed) {
11697 err = tg3_request_firmware(tp);
11698 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11700 netdev_warn(tp->dev, "EEE capability disabled\n");
11701 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11702 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11703 netdev_warn(tp->dev, "EEE capability restored\n");
11704 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11706 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11710 netdev_warn(tp->dev, "TSO capability disabled\n");
11711 tg3_flag_clear(tp, TSO_CAPABLE);
11712 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11713 netdev_notice(tp->dev, "TSO capability restored\n");
11714 tg3_flag_set(tp, TSO_CAPABLE);
11718 tg3_carrier_off(tp);
11720 err = tg3_power_up(tp);
11724 tg3_full_lock(tp, 0);
11726 tg3_disable_ints(tp);
11727 tg3_flag_clear(tp, INIT_COMPLETE);
11729 tg3_full_unlock(tp);
11731 err = tg3_start(tp,
11732 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11735 tg3_frob_aux_power(tp, false);
11736 pci_set_power_state(tp->pdev, PCI_D3hot);
11742 static int tg3_close(struct net_device *dev)
11744 struct tg3 *tp = netdev_priv(dev);
11746 if (tp->pcierr_recovery) {
11747 netdev_err(dev, "Failed to close device. PCI error recovery "
11754 if (pci_device_is_present(tp->pdev)) {
11755 tg3_power_down_prepare(tp);
11757 tg3_carrier_off(tp);
11762 static inline u64 get_stat64(tg3_stat64_t *val)
11764 return ((u64)val->high << 32) | ((u64)val->low);
11767 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11769 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11771 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11772 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11773 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11776 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11777 tg3_writephy(tp, MII_TG3_TEST1,
11778 val | MII_TG3_TEST1_CRC_EN);
11779 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11783 tp->phy_crc_errors += val;
11785 return tp->phy_crc_errors;
11788 return get_stat64(&hw_stats->rx_fcs_errors);
11791 #define ESTAT_ADD(member) \
11792 estats->member = old_estats->member + \
11793 get_stat64(&hw_stats->member)
11795 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11797 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11798 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11800 ESTAT_ADD(rx_octets);
11801 ESTAT_ADD(rx_fragments);
11802 ESTAT_ADD(rx_ucast_packets);
11803 ESTAT_ADD(rx_mcast_packets);
11804 ESTAT_ADD(rx_bcast_packets);
11805 ESTAT_ADD(rx_fcs_errors);
11806 ESTAT_ADD(rx_align_errors);
11807 ESTAT_ADD(rx_xon_pause_rcvd);
11808 ESTAT_ADD(rx_xoff_pause_rcvd);
11809 ESTAT_ADD(rx_mac_ctrl_rcvd);
11810 ESTAT_ADD(rx_xoff_entered);
11811 ESTAT_ADD(rx_frame_too_long_errors);
11812 ESTAT_ADD(rx_jabbers);
11813 ESTAT_ADD(rx_undersize_packets);
11814 ESTAT_ADD(rx_in_length_errors);
11815 ESTAT_ADD(rx_out_length_errors);
11816 ESTAT_ADD(rx_64_or_less_octet_packets);
11817 ESTAT_ADD(rx_65_to_127_octet_packets);
11818 ESTAT_ADD(rx_128_to_255_octet_packets);
11819 ESTAT_ADD(rx_256_to_511_octet_packets);
11820 ESTAT_ADD(rx_512_to_1023_octet_packets);
11821 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11822 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11823 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11824 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11825 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11827 ESTAT_ADD(tx_octets);
11828 ESTAT_ADD(tx_collisions);
11829 ESTAT_ADD(tx_xon_sent);
11830 ESTAT_ADD(tx_xoff_sent);
11831 ESTAT_ADD(tx_flow_control);
11832 ESTAT_ADD(tx_mac_errors);
11833 ESTAT_ADD(tx_single_collisions);
11834 ESTAT_ADD(tx_mult_collisions);
11835 ESTAT_ADD(tx_deferred);
11836 ESTAT_ADD(tx_excessive_collisions);
11837 ESTAT_ADD(tx_late_collisions);
11838 ESTAT_ADD(tx_collide_2times);
11839 ESTAT_ADD(tx_collide_3times);
11840 ESTAT_ADD(tx_collide_4times);
11841 ESTAT_ADD(tx_collide_5times);
11842 ESTAT_ADD(tx_collide_6times);
11843 ESTAT_ADD(tx_collide_7times);
11844 ESTAT_ADD(tx_collide_8times);
11845 ESTAT_ADD(tx_collide_9times);
11846 ESTAT_ADD(tx_collide_10times);
11847 ESTAT_ADD(tx_collide_11times);
11848 ESTAT_ADD(tx_collide_12times);
11849 ESTAT_ADD(tx_collide_13times);
11850 ESTAT_ADD(tx_collide_14times);
11851 ESTAT_ADD(tx_collide_15times);
11852 ESTAT_ADD(tx_ucast_packets);
11853 ESTAT_ADD(tx_mcast_packets);
11854 ESTAT_ADD(tx_bcast_packets);
11855 ESTAT_ADD(tx_carrier_sense_errors);
11856 ESTAT_ADD(tx_discards);
11857 ESTAT_ADD(tx_errors);
11859 ESTAT_ADD(dma_writeq_full);
11860 ESTAT_ADD(dma_write_prioq_full);
11861 ESTAT_ADD(rxbds_empty);
11862 ESTAT_ADD(rx_discards);
11863 ESTAT_ADD(rx_errors);
11864 ESTAT_ADD(rx_threshold_hit);
11866 ESTAT_ADD(dma_readq_full);
11867 ESTAT_ADD(dma_read_prioq_full);
11868 ESTAT_ADD(tx_comp_queue_full);
11870 ESTAT_ADD(ring_set_send_prod_index);
11871 ESTAT_ADD(ring_status_update);
11872 ESTAT_ADD(nic_irqs);
11873 ESTAT_ADD(nic_avoided_irqs);
11874 ESTAT_ADD(nic_tx_threshold_hit);
11876 ESTAT_ADD(mbuf_lwm_thresh_hit);
11879 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11881 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11882 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11884 stats->rx_packets = old_stats->rx_packets +
11885 get_stat64(&hw_stats->rx_ucast_packets) +
11886 get_stat64(&hw_stats->rx_mcast_packets) +
11887 get_stat64(&hw_stats->rx_bcast_packets);
11889 stats->tx_packets = old_stats->tx_packets +
11890 get_stat64(&hw_stats->tx_ucast_packets) +
11891 get_stat64(&hw_stats->tx_mcast_packets) +
11892 get_stat64(&hw_stats->tx_bcast_packets);
11894 stats->rx_bytes = old_stats->rx_bytes +
11895 get_stat64(&hw_stats->rx_octets);
11896 stats->tx_bytes = old_stats->tx_bytes +
11897 get_stat64(&hw_stats->tx_octets);
11899 stats->rx_errors = old_stats->rx_errors +
11900 get_stat64(&hw_stats->rx_errors);
11901 stats->tx_errors = old_stats->tx_errors +
11902 get_stat64(&hw_stats->tx_errors) +
11903 get_stat64(&hw_stats->tx_mac_errors) +
11904 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11905 get_stat64(&hw_stats->tx_discards);
11907 stats->multicast = old_stats->multicast +
11908 get_stat64(&hw_stats->rx_mcast_packets);
11909 stats->collisions = old_stats->collisions +
11910 get_stat64(&hw_stats->tx_collisions);
11912 stats->rx_length_errors = old_stats->rx_length_errors +
11913 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11914 get_stat64(&hw_stats->rx_undersize_packets);
11916 stats->rx_frame_errors = old_stats->rx_frame_errors +
11917 get_stat64(&hw_stats->rx_align_errors);
11918 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11919 get_stat64(&hw_stats->tx_discards);
11920 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11921 get_stat64(&hw_stats->tx_carrier_sense_errors);
11923 stats->rx_crc_errors = old_stats->rx_crc_errors +
11924 tg3_calc_crc_errors(tp);
11926 stats->rx_missed_errors = old_stats->rx_missed_errors +
11927 get_stat64(&hw_stats->rx_discards);
11929 stats->rx_dropped = tp->rx_dropped;
11930 stats->tx_dropped = tp->tx_dropped;
11933 static int tg3_get_regs_len(struct net_device *dev)
11935 return TG3_REG_BLK_SIZE;
11938 static void tg3_get_regs(struct net_device *dev,
11939 struct ethtool_regs *regs, void *_p)
11941 struct tg3 *tp = netdev_priv(dev);
11945 memset(_p, 0, TG3_REG_BLK_SIZE);
11947 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11950 tg3_full_lock(tp, 0);
11952 tg3_dump_legacy_regs(tp, (u32 *)_p);
11954 tg3_full_unlock(tp);
11957 static int tg3_get_eeprom_len(struct net_device *dev)
11959 struct tg3 *tp = netdev_priv(dev);
11961 return tp->nvram_size;
11964 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11966 struct tg3 *tp = netdev_priv(dev);
11967 int ret, cpmu_restore = 0;
11969 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11972 if (tg3_flag(tp, NO_NVRAM))
11975 offset = eeprom->offset;
11979 eeprom->magic = TG3_EEPROM_MAGIC;
11981 /* Override clock, link aware and link idle modes */
11982 if (tg3_flag(tp, CPMU_PRESENT)) {
11983 cpmu_val = tr32(TG3_CPMU_CTRL);
11984 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11985 CPMU_CTRL_LINK_IDLE_MODE)) {
11986 tw32(TG3_CPMU_CTRL, cpmu_val &
11987 ~(CPMU_CTRL_LINK_AWARE_MODE |
11988 CPMU_CTRL_LINK_IDLE_MODE));
11992 tg3_override_clk(tp);
11995 /* adjustments to start on required 4 byte boundary */
11996 b_offset = offset & 3;
11997 b_count = 4 - b_offset;
11998 if (b_count > len) {
11999 /* i.e. offset=1 len=2 */
12002 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12005 memcpy(data, ((char *)&val) + b_offset, b_count);
12008 eeprom->len += b_count;
12011 /* read bytes up to the last 4 byte boundary */
12012 pd = &data[eeprom->len];
12013 for (i = 0; i < (len - (len & 3)); i += 4) {
12014 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12021 memcpy(pd + i, &val, 4);
12022 if (need_resched()) {
12023 if (signal_pending(current)) {
12034 /* read last bytes not ending on 4 byte boundary */
12035 pd = &data[eeprom->len];
12037 b_offset = offset + len - b_count;
12038 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12041 memcpy(pd, &val, b_count);
12042 eeprom->len += b_count;
12047 /* Restore clock, link aware and link idle modes */
12048 tg3_restore_clk(tp);
12050 tw32(TG3_CPMU_CTRL, cpmu_val);
12055 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12057 struct tg3 *tp = netdev_priv(dev);
12059 u32 offset, len, b_offset, odd_len;
12061 __be32 start = 0, end;
12063 if (tg3_flag(tp, NO_NVRAM) ||
12064 eeprom->magic != TG3_EEPROM_MAGIC)
12067 offset = eeprom->offset;
12070 if ((b_offset = (offset & 3))) {
12071 /* adjustments to start on required 4 byte boundary */
12072 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12083 /* adjustments to end on required 4 byte boundary */
12085 len = (len + 3) & ~3;
12086 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12092 if (b_offset || odd_len) {
12093 buf = kmalloc(len, GFP_KERNEL);
12097 memcpy(buf, &start, 4);
12099 memcpy(buf+len-4, &end, 4);
12100 memcpy(buf + b_offset, data, eeprom->len);
12103 ret = tg3_nvram_write_block(tp, offset, len, buf);
12111 static int tg3_get_link_ksettings(struct net_device *dev,
12112 struct ethtool_link_ksettings *cmd)
12114 struct tg3 *tp = netdev_priv(dev);
12115 u32 supported, advertising;
12117 if (tg3_flag(tp, USE_PHYLIB)) {
12118 struct phy_device *phydev;
12119 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12121 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12122 phy_ethtool_ksettings_get(phydev, cmd);
12127 supported = (SUPPORTED_Autoneg);
12129 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12130 supported |= (SUPPORTED_1000baseT_Half |
12131 SUPPORTED_1000baseT_Full);
12133 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12134 supported |= (SUPPORTED_100baseT_Half |
12135 SUPPORTED_100baseT_Full |
12136 SUPPORTED_10baseT_Half |
12137 SUPPORTED_10baseT_Full |
12139 cmd->base.port = PORT_TP;
12141 supported |= SUPPORTED_FIBRE;
12142 cmd->base.port = PORT_FIBRE;
12144 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12147 advertising = tp->link_config.advertising;
12148 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12149 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12150 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12151 advertising |= ADVERTISED_Pause;
12153 advertising |= ADVERTISED_Pause |
12154 ADVERTISED_Asym_Pause;
12156 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12157 advertising |= ADVERTISED_Asym_Pause;
12160 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12163 if (netif_running(dev) && tp->link_up) {
12164 cmd->base.speed = tp->link_config.active_speed;
12165 cmd->base.duplex = tp->link_config.active_duplex;
12166 ethtool_convert_legacy_u32_to_link_mode(
12167 cmd->link_modes.lp_advertising,
12168 tp->link_config.rmt_adv);
12170 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12171 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12172 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12174 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12177 cmd->base.speed = SPEED_UNKNOWN;
12178 cmd->base.duplex = DUPLEX_UNKNOWN;
12179 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12181 cmd->base.phy_address = tp->phy_addr;
12182 cmd->base.autoneg = tp->link_config.autoneg;
12186 static int tg3_set_link_ksettings(struct net_device *dev,
12187 const struct ethtool_link_ksettings *cmd)
12189 struct tg3 *tp = netdev_priv(dev);
12190 u32 speed = cmd->base.speed;
12193 if (tg3_flag(tp, USE_PHYLIB)) {
12194 struct phy_device *phydev;
12195 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12197 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12198 return phy_ethtool_ksettings_set(phydev, cmd);
12201 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12202 cmd->base.autoneg != AUTONEG_DISABLE)
12205 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12206 cmd->base.duplex != DUPLEX_FULL &&
12207 cmd->base.duplex != DUPLEX_HALF)
12210 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12211 cmd->link_modes.advertising);
12213 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12214 u32 mask = ADVERTISED_Autoneg |
12216 ADVERTISED_Asym_Pause;
12218 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12219 mask |= ADVERTISED_1000baseT_Half |
12220 ADVERTISED_1000baseT_Full;
12222 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12223 mask |= ADVERTISED_100baseT_Half |
12224 ADVERTISED_100baseT_Full |
12225 ADVERTISED_10baseT_Half |
12226 ADVERTISED_10baseT_Full |
12229 mask |= ADVERTISED_FIBRE;
12231 if (advertising & ~mask)
12234 mask &= (ADVERTISED_1000baseT_Half |
12235 ADVERTISED_1000baseT_Full |
12236 ADVERTISED_100baseT_Half |
12237 ADVERTISED_100baseT_Full |
12238 ADVERTISED_10baseT_Half |
12239 ADVERTISED_10baseT_Full);
12241 advertising &= mask;
12243 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12244 if (speed != SPEED_1000)
12247 if (cmd->base.duplex != DUPLEX_FULL)
12250 if (speed != SPEED_100 &&
12256 tg3_full_lock(tp, 0);
12258 tp->link_config.autoneg = cmd->base.autoneg;
12259 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12260 tp->link_config.advertising = (advertising |
12261 ADVERTISED_Autoneg);
12262 tp->link_config.speed = SPEED_UNKNOWN;
12263 tp->link_config.duplex = DUPLEX_UNKNOWN;
12265 tp->link_config.advertising = 0;
12266 tp->link_config.speed = speed;
12267 tp->link_config.duplex = cmd->base.duplex;
12270 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12272 tg3_warn_mgmt_link_flap(tp);
12274 if (netif_running(dev))
12275 tg3_setup_phy(tp, true);
12277 tg3_full_unlock(tp);
12282 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12284 struct tg3 *tp = netdev_priv(dev);
12286 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12287 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12288 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12289 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12292 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12294 struct tg3 *tp = netdev_priv(dev);
12296 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12297 wol->supported = WAKE_MAGIC;
12299 wol->supported = 0;
12301 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12302 wol->wolopts = WAKE_MAGIC;
12303 memset(&wol->sopass, 0, sizeof(wol->sopass));
12306 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 struct tg3 *tp = netdev_priv(dev);
12309 struct device *dp = &tp->pdev->dev;
12311 if (wol->wolopts & ~WAKE_MAGIC)
12313 if ((wol->wolopts & WAKE_MAGIC) &&
12314 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12317 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12319 if (device_may_wakeup(dp))
12320 tg3_flag_set(tp, WOL_ENABLE);
12322 tg3_flag_clear(tp, WOL_ENABLE);
12327 static u32 tg3_get_msglevel(struct net_device *dev)
12329 struct tg3 *tp = netdev_priv(dev);
12330 return tp->msg_enable;
12333 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12335 struct tg3 *tp = netdev_priv(dev);
12336 tp->msg_enable = value;
12339 static int tg3_nway_reset(struct net_device *dev)
12341 struct tg3 *tp = netdev_priv(dev);
12344 if (!netif_running(dev))
12347 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12350 tg3_warn_mgmt_link_flap(tp);
12352 if (tg3_flag(tp, USE_PHYLIB)) {
12353 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12355 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12359 spin_lock_bh(&tp->lock);
12361 tg3_readphy(tp, MII_BMCR, &bmcr);
12362 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12363 ((bmcr & BMCR_ANENABLE) ||
12364 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12365 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12369 spin_unlock_bh(&tp->lock);
12375 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12377 struct tg3 *tp = netdev_priv(dev);
12379 ering->rx_max_pending = tp->rx_std_ring_mask;
12380 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12381 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12383 ering->rx_jumbo_max_pending = 0;
12385 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12387 ering->rx_pending = tp->rx_pending;
12388 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12389 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12391 ering->rx_jumbo_pending = 0;
12393 ering->tx_pending = tp->napi[0].tx_pending;
12396 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12398 struct tg3 *tp = netdev_priv(dev);
12399 int i, irq_sync = 0, err = 0;
12401 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12402 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12403 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12404 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12405 (tg3_flag(tp, TSO_BUG) &&
12406 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12409 if (netif_running(dev)) {
12411 tg3_netif_stop(tp);
12415 tg3_full_lock(tp, irq_sync);
12417 tp->rx_pending = ering->rx_pending;
12419 if (tg3_flag(tp, MAX_RXPEND_64) &&
12420 tp->rx_pending > 63)
12421 tp->rx_pending = 63;
12423 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12424 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12426 for (i = 0; i < tp->irq_max; i++)
12427 tp->napi[i].tx_pending = ering->tx_pending;
12429 if (netif_running(dev)) {
12430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12431 err = tg3_restart_hw(tp, false);
12433 tg3_netif_start(tp);
12436 tg3_full_unlock(tp);
12438 if (irq_sync && !err)
12444 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12446 struct tg3 *tp = netdev_priv(dev);
12448 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12450 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12451 epause->rx_pause = 1;
12453 epause->rx_pause = 0;
12455 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12456 epause->tx_pause = 1;
12458 epause->tx_pause = 0;
12461 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12463 struct tg3 *tp = netdev_priv(dev);
12466 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12467 tg3_warn_mgmt_link_flap(tp);
12469 if (tg3_flag(tp, USE_PHYLIB)) {
12471 struct phy_device *phydev;
12473 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12475 if (!(phydev->supported & SUPPORTED_Pause) ||
12476 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12477 (epause->rx_pause != epause->tx_pause)))
12480 tp->link_config.flowctrl = 0;
12481 if (epause->rx_pause) {
12482 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12484 if (epause->tx_pause) {
12485 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12486 newadv = ADVERTISED_Pause;
12488 newadv = ADVERTISED_Pause |
12489 ADVERTISED_Asym_Pause;
12490 } else if (epause->tx_pause) {
12491 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12492 newadv = ADVERTISED_Asym_Pause;
12496 if (epause->autoneg)
12497 tg3_flag_set(tp, PAUSE_AUTONEG);
12499 tg3_flag_clear(tp, PAUSE_AUTONEG);
12501 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12502 u32 oldadv = phydev->advertising &
12503 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12504 if (oldadv != newadv) {
12505 phydev->advertising &=
12506 ~(ADVERTISED_Pause |
12507 ADVERTISED_Asym_Pause);
12508 phydev->advertising |= newadv;
12509 if (phydev->autoneg) {
12511 * Always renegotiate the link to
12512 * inform our link partner of our
12513 * flow control settings, even if the
12514 * flow control is forced. Let
12515 * tg3_adjust_link() do the final
12516 * flow control setup.
12518 return phy_start_aneg(phydev);
12522 if (!epause->autoneg)
12523 tg3_setup_flow_control(tp, 0, 0);
12525 tp->link_config.advertising &=
12526 ~(ADVERTISED_Pause |
12527 ADVERTISED_Asym_Pause);
12528 tp->link_config.advertising |= newadv;
12533 if (netif_running(dev)) {
12534 tg3_netif_stop(tp);
12538 tg3_full_lock(tp, irq_sync);
12540 if (epause->autoneg)
12541 tg3_flag_set(tp, PAUSE_AUTONEG);
12543 tg3_flag_clear(tp, PAUSE_AUTONEG);
12544 if (epause->rx_pause)
12545 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12547 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12548 if (epause->tx_pause)
12549 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12551 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12553 if (netif_running(dev)) {
12554 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12555 err = tg3_restart_hw(tp, false);
12557 tg3_netif_start(tp);
12560 tg3_full_unlock(tp);
12563 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12568 static int tg3_get_sset_count(struct net_device *dev, int sset)
12572 return TG3_NUM_TEST;
12574 return TG3_NUM_STATS;
12576 return -EOPNOTSUPP;
12580 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12581 u32 *rules __always_unused)
12583 struct tg3 *tp = netdev_priv(dev);
12585 if (!tg3_flag(tp, SUPPORT_MSIX))
12586 return -EOPNOTSUPP;
12588 switch (info->cmd) {
12589 case ETHTOOL_GRXRINGS:
12590 if (netif_running(tp->dev))
12591 info->data = tp->rxq_cnt;
12593 info->data = num_online_cpus();
12594 if (info->data > TG3_RSS_MAX_NUM_QS)
12595 info->data = TG3_RSS_MAX_NUM_QS;
12601 return -EOPNOTSUPP;
12605 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12608 struct tg3 *tp = netdev_priv(dev);
12610 if (tg3_flag(tp, SUPPORT_MSIX))
12611 size = TG3_RSS_INDIR_TBL_SIZE;
12616 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12618 struct tg3 *tp = netdev_priv(dev);
12622 *hfunc = ETH_RSS_HASH_TOP;
12626 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12627 indir[i] = tp->rss_ind_tbl[i];
12632 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12635 struct tg3 *tp = netdev_priv(dev);
12638 /* We require at least one supported parameter to be changed and no
12639 * change in any of the unsupported parameters
12642 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12643 return -EOPNOTSUPP;
12648 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12649 tp->rss_ind_tbl[i] = indir[i];
12651 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12654 /* It is legal to write the indirection
12655 * table while the device is running.
12657 tg3_full_lock(tp, 0);
12658 tg3_rss_write_indir_tbl(tp);
12659 tg3_full_unlock(tp);
12664 static void tg3_get_channels(struct net_device *dev,
12665 struct ethtool_channels *channel)
12667 struct tg3 *tp = netdev_priv(dev);
12668 u32 deflt_qs = netif_get_num_default_rss_queues();
12670 channel->max_rx = tp->rxq_max;
12671 channel->max_tx = tp->txq_max;
12673 if (netif_running(dev)) {
12674 channel->rx_count = tp->rxq_cnt;
12675 channel->tx_count = tp->txq_cnt;
12678 channel->rx_count = tp->rxq_req;
12680 channel->rx_count = min(deflt_qs, tp->rxq_max);
12683 channel->tx_count = tp->txq_req;
12685 channel->tx_count = min(deflt_qs, tp->txq_max);
12689 static int tg3_set_channels(struct net_device *dev,
12690 struct ethtool_channels *channel)
12692 struct tg3 *tp = netdev_priv(dev);
12694 if (!tg3_flag(tp, SUPPORT_MSIX))
12695 return -EOPNOTSUPP;
12697 if (channel->rx_count > tp->rxq_max ||
12698 channel->tx_count > tp->txq_max)
12701 tp->rxq_req = channel->rx_count;
12702 tp->txq_req = channel->tx_count;
12704 if (!netif_running(dev))
12709 tg3_carrier_off(tp);
12711 tg3_start(tp, true, false, false);
12716 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12718 switch (stringset) {
12720 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12723 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12726 WARN_ON(1); /* we need a WARN() */
12731 static int tg3_set_phys_id(struct net_device *dev,
12732 enum ethtool_phys_id_state state)
12734 struct tg3 *tp = netdev_priv(dev);
12736 if (!netif_running(tp->dev))
12740 case ETHTOOL_ID_ACTIVE:
12741 return 1; /* cycle on/off once per second */
12743 case ETHTOOL_ID_ON:
12744 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12745 LED_CTRL_1000MBPS_ON |
12746 LED_CTRL_100MBPS_ON |
12747 LED_CTRL_10MBPS_ON |
12748 LED_CTRL_TRAFFIC_OVERRIDE |
12749 LED_CTRL_TRAFFIC_BLINK |
12750 LED_CTRL_TRAFFIC_LED);
12753 case ETHTOOL_ID_OFF:
12754 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12755 LED_CTRL_TRAFFIC_OVERRIDE);
12758 case ETHTOOL_ID_INACTIVE:
12759 tw32(MAC_LED_CTRL, tp->led_ctrl);
12766 static void tg3_get_ethtool_stats(struct net_device *dev,
12767 struct ethtool_stats *estats, u64 *tmp_stats)
12769 struct tg3 *tp = netdev_priv(dev);
12772 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12774 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12777 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12781 u32 offset = 0, len = 0;
12784 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12787 if (magic == TG3_EEPROM_MAGIC) {
12788 for (offset = TG3_NVM_DIR_START;
12789 offset < TG3_NVM_DIR_END;
12790 offset += TG3_NVM_DIRENT_SIZE) {
12791 if (tg3_nvram_read(tp, offset, &val))
12794 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12795 TG3_NVM_DIRTYPE_EXTVPD)
12799 if (offset != TG3_NVM_DIR_END) {
12800 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12801 if (tg3_nvram_read(tp, offset + 4, &offset))
12804 offset = tg3_nvram_logical_addr(tp, offset);
12808 if (!offset || !len) {
12809 offset = TG3_NVM_VPD_OFF;
12810 len = TG3_NVM_VPD_LEN;
12813 buf = kmalloc(len, GFP_KERNEL);
12817 if (magic == TG3_EEPROM_MAGIC) {
12818 for (i = 0; i < len; i += 4) {
12819 /* The data is in little-endian format in NVRAM.
12820 * Use the big-endian read routines to preserve
12821 * the byte order as it exists in NVRAM.
12823 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12829 unsigned int pos = 0;
12831 ptr = (u8 *)&buf[0];
12832 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12833 cnt = pci_read_vpd(tp->pdev, pos,
12835 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12853 #define NVRAM_TEST_SIZE 0x100
12854 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12855 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12856 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12857 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12858 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12859 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12860 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12861 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12863 static int tg3_test_nvram(struct tg3 *tp)
12865 u32 csum, magic, len;
12867 int i, j, k, err = 0, size;
12869 if (tg3_flag(tp, NO_NVRAM))
12872 if (tg3_nvram_read(tp, 0, &magic) != 0)
12875 if (magic == TG3_EEPROM_MAGIC)
12876 size = NVRAM_TEST_SIZE;
12877 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879 TG3_EEPROM_SB_FORMAT_1) {
12880 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881 case TG3_EEPROM_SB_REVISION_0:
12882 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12884 case TG3_EEPROM_SB_REVISION_2:
12885 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12887 case TG3_EEPROM_SB_REVISION_3:
12888 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12890 case TG3_EEPROM_SB_REVISION_4:
12891 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12893 case TG3_EEPROM_SB_REVISION_5:
12894 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12896 case TG3_EEPROM_SB_REVISION_6:
12897 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12904 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905 size = NVRAM_SELFBOOT_HW_SIZE;
12909 buf = kmalloc(size, GFP_KERNEL);
12914 for (i = 0, j = 0; i < size; i += 4, j++) {
12915 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12922 /* Selfboot format */
12923 magic = be32_to_cpu(buf[0]);
12924 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925 TG3_EEPROM_MAGIC_FW) {
12926 u8 *buf8 = (u8 *) buf, csum8 = 0;
12928 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929 TG3_EEPROM_SB_REVISION_2) {
12930 /* For rev 2, the csum doesn't include the MBA. */
12931 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12933 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12936 for (i = 0; i < size; i++)
12949 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950 TG3_EEPROM_MAGIC_HW) {
12951 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953 u8 *buf8 = (u8 *) buf;
12955 /* Separate the parity bits and the data bytes. */
12956 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957 if ((i == 0) || (i == 8)) {
12961 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962 parity[k++] = buf8[i] & msk;
12964 } else if (i == 16) {
12968 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969 parity[k++] = buf8[i] & msk;
12972 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973 parity[k++] = buf8[i] & msk;
12976 data[j++] = buf8[i];
12980 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981 u8 hw8 = hweight8(data[i]);
12983 if ((hw8 & 0x1) && parity[i])
12985 else if (!(hw8 & 0x1) && !parity[i])
12994 /* Bootstrap checksum at offset 0x10 */
12995 csum = calc_crc((unsigned char *) buf, 0x10);
12996 if (csum != le32_to_cpu(buf[0x10/4]))
12999 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001 if (csum != le32_to_cpu(buf[0xfc/4]))
13006 buf = tg3_vpd_readblock(tp, &len);
13010 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13012 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13016 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13019 i += PCI_VPD_LRDT_TAG_SIZE;
13020 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13021 PCI_VPD_RO_KEYWORD_CHKSUM);
13025 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13027 for (i = 0; i <= j; i++)
13028 csum8 += ((u8 *)buf)[i];
13042 #define TG3_SERDES_TIMEOUT_SEC 2
13043 #define TG3_COPPER_TIMEOUT_SEC 6
13045 static int tg3_test_link(struct tg3 *tp)
13049 if (!netif_running(tp->dev))
13052 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13053 max = TG3_SERDES_TIMEOUT_SEC;
13055 max = TG3_COPPER_TIMEOUT_SEC;
13057 for (i = 0; i < max; i++) {
13061 if (msleep_interruptible(1000))
13068 /* Only test the commonly used registers */
13069 static int tg3_test_registers(struct tg3 *tp)
13071 int i, is_5705, is_5750;
13072 u32 offset, read_mask, write_mask, val, save_val, read_val;
13076 #define TG3_FL_5705 0x1
13077 #define TG3_FL_NOT_5705 0x2
13078 #define TG3_FL_NOT_5788 0x4
13079 #define TG3_FL_NOT_5750 0x8
13083 /* MAC Control Registers */
13084 { MAC_MODE, TG3_FL_NOT_5705,
13085 0x00000000, 0x00ef6f8c },
13086 { MAC_MODE, TG3_FL_5705,
13087 0x00000000, 0x01ef6b8c },
13088 { MAC_STATUS, TG3_FL_NOT_5705,
13089 0x03800107, 0x00000000 },
13090 { MAC_STATUS, TG3_FL_5705,
13091 0x03800100, 0x00000000 },
13092 { MAC_ADDR_0_HIGH, 0x0000,
13093 0x00000000, 0x0000ffff },
13094 { MAC_ADDR_0_LOW, 0x0000,
13095 0x00000000, 0xffffffff },
13096 { MAC_RX_MTU_SIZE, 0x0000,
13097 0x00000000, 0x0000ffff },
13098 { MAC_TX_MODE, 0x0000,
13099 0x00000000, 0x00000070 },
13100 { MAC_TX_LENGTHS, 0x0000,
13101 0x00000000, 0x00003fff },
13102 { MAC_RX_MODE, TG3_FL_NOT_5705,
13103 0x00000000, 0x000007fc },
13104 { MAC_RX_MODE, TG3_FL_5705,
13105 0x00000000, 0x000007dc },
13106 { MAC_HASH_REG_0, 0x0000,
13107 0x00000000, 0xffffffff },
13108 { MAC_HASH_REG_1, 0x0000,
13109 0x00000000, 0xffffffff },
13110 { MAC_HASH_REG_2, 0x0000,
13111 0x00000000, 0xffffffff },
13112 { MAC_HASH_REG_3, 0x0000,
13113 0x00000000, 0xffffffff },
13115 /* Receive Data and Receive BD Initiator Control Registers. */
13116 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13117 0x00000000, 0xffffffff },
13118 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13119 0x00000000, 0xffffffff },
13120 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13121 0x00000000, 0x00000003 },
13122 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13123 0x00000000, 0xffffffff },
13124 { RCVDBDI_STD_BD+0, 0x0000,
13125 0x00000000, 0xffffffff },
13126 { RCVDBDI_STD_BD+4, 0x0000,
13127 0x00000000, 0xffffffff },
13128 { RCVDBDI_STD_BD+8, 0x0000,
13129 0x00000000, 0xffff0002 },
13130 { RCVDBDI_STD_BD+0xc, 0x0000,
13131 0x00000000, 0xffffffff },
13133 /* Receive BD Initiator Control Registers. */
13134 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13135 0x00000000, 0xffffffff },
13136 { RCVBDI_STD_THRESH, TG3_FL_5705,
13137 0x00000000, 0x000003ff },
13138 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13139 0x00000000, 0xffffffff },
13141 /* Host Coalescing Control Registers. */
13142 { HOSTCC_MODE, TG3_FL_NOT_5705,
13143 0x00000000, 0x00000004 },
13144 { HOSTCC_MODE, TG3_FL_5705,
13145 0x00000000, 0x000000f6 },
13146 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13149 0x00000000, 0x000003ff },
13150 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13152 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13153 0x00000000, 0x000003ff },
13154 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13155 0x00000000, 0xffffffff },
13156 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13157 0x00000000, 0x000000ff },
13158 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13161 0x00000000, 0x000000ff },
13162 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13165 0x00000000, 0xffffffff },
13166 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13169 0x00000000, 0x000000ff },
13170 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13173 0x00000000, 0x000000ff },
13174 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13177 0x00000000, 0xffffffff },
13178 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13181 0x00000000, 0xffffffff },
13182 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13185 0xffffffff, 0x00000000 },
13186 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13187 0xffffffff, 0x00000000 },
13189 /* Buffer Manager Control Registers. */
13190 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13191 0x00000000, 0x007fff80 },
13192 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13193 0x00000000, 0x007fffff },
13194 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13195 0x00000000, 0x0000003f },
13196 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13197 0x00000000, 0x000001ff },
13198 { BUFMGR_MB_HIGH_WATER, 0x0000,
13199 0x00000000, 0x000001ff },
13200 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13201 0xffffffff, 0x00000000 },
13202 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13203 0xffffffff, 0x00000000 },
13205 /* Mailbox Registers */
13206 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13207 0x00000000, 0x000001ff },
13208 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13209 0x00000000, 0x000001ff },
13210 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13211 0x00000000, 0x000007ff },
13212 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13213 0x00000000, 0x000001ff },
13215 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13218 is_5705 = is_5750 = 0;
13219 if (tg3_flag(tp, 5705_PLUS)) {
13221 if (tg3_flag(tp, 5750_PLUS))
13225 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13226 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13229 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13232 if (tg3_flag(tp, IS_5788) &&
13233 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13236 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13239 offset = (u32) reg_tbl[i].offset;
13240 read_mask = reg_tbl[i].read_mask;
13241 write_mask = reg_tbl[i].write_mask;
13243 /* Save the original register content */
13244 save_val = tr32(offset);
13246 /* Determine the read-only value. */
13247 read_val = save_val & read_mask;
13249 /* Write zero to the register, then make sure the read-only bits
13250 * are not changed and the read/write bits are all zeros.
13254 val = tr32(offset);
13256 /* Test the read-only and read/write bits. */
13257 if (((val & read_mask) != read_val) || (val & write_mask))
13260 /* Write ones to all the bits defined by RdMask and WrMask, then
13261 * make sure the read-only bits are not changed and the
13262 * read/write bits are all ones.
13264 tw32(offset, read_mask | write_mask);
13266 val = tr32(offset);
13268 /* Test the read-only bits. */
13269 if ((val & read_mask) != read_val)
13272 /* Test the read/write bits. */
13273 if ((val & write_mask) != write_mask)
13276 tw32(offset, save_val);
13282 if (netif_msg_hw(tp))
13283 netdev_err(tp->dev,
13284 "Register test failed at offset %x\n", offset);
13285 tw32(offset, save_val);
13289 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13291 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13295 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13296 for (j = 0; j < len; j += 4) {
13299 tg3_write_mem(tp, offset + j, test_pattern[i]);
13300 tg3_read_mem(tp, offset + j, &val);
13301 if (val != test_pattern[i])
13308 static int tg3_test_memory(struct tg3 *tp)
13310 static struct mem_entry {
13313 } mem_tbl_570x[] = {
13314 { 0x00000000, 0x00b50},
13315 { 0x00002000, 0x1c000},
13316 { 0xffffffff, 0x00000}
13317 }, mem_tbl_5705[] = {
13318 { 0x00000100, 0x0000c},
13319 { 0x00000200, 0x00008},
13320 { 0x00004000, 0x00800},
13321 { 0x00006000, 0x01000},
13322 { 0x00008000, 0x02000},
13323 { 0x00010000, 0x0e000},
13324 { 0xffffffff, 0x00000}
13325 }, mem_tbl_5755[] = {
13326 { 0x00000200, 0x00008},
13327 { 0x00004000, 0x00800},
13328 { 0x00006000, 0x00800},
13329 { 0x00008000, 0x02000},
13330 { 0x00010000, 0x0c000},
13331 { 0xffffffff, 0x00000}
13332 }, mem_tbl_5906[] = {
13333 { 0x00000200, 0x00008},
13334 { 0x00004000, 0x00400},
13335 { 0x00006000, 0x00400},
13336 { 0x00008000, 0x01000},
13337 { 0x00010000, 0x01000},
13338 { 0xffffffff, 0x00000}
13339 }, mem_tbl_5717[] = {
13340 { 0x00000200, 0x00008},
13341 { 0x00010000, 0x0a000},
13342 { 0x00020000, 0x13c00},
13343 { 0xffffffff, 0x00000}
13344 }, mem_tbl_57765[] = {
13345 { 0x00000200, 0x00008},
13346 { 0x00004000, 0x00800},
13347 { 0x00006000, 0x09800},
13348 { 0x00010000, 0x0a000},
13349 { 0xffffffff, 0x00000}
13351 struct mem_entry *mem_tbl;
13355 if (tg3_flag(tp, 5717_PLUS))
13356 mem_tbl = mem_tbl_5717;
13357 else if (tg3_flag(tp, 57765_CLASS) ||
13358 tg3_asic_rev(tp) == ASIC_REV_5762)
13359 mem_tbl = mem_tbl_57765;
13360 else if (tg3_flag(tp, 5755_PLUS))
13361 mem_tbl = mem_tbl_5755;
13362 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13363 mem_tbl = mem_tbl_5906;
13364 else if (tg3_flag(tp, 5705_PLUS))
13365 mem_tbl = mem_tbl_5705;
13367 mem_tbl = mem_tbl_570x;
13369 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13370 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13378 #define TG3_TSO_MSS 500
13380 #define TG3_TSO_IP_HDR_LEN 20
13381 #define TG3_TSO_TCP_HDR_LEN 20
13382 #define TG3_TSO_TCP_OPT_LEN 12
13384 static const u8 tg3_tso_header[] = {
13386 0x45, 0x00, 0x00, 0x00,
13387 0x00, 0x00, 0x40, 0x00,
13388 0x40, 0x06, 0x00, 0x00,
13389 0x0a, 0x00, 0x00, 0x01,
13390 0x0a, 0x00, 0x00, 0x02,
13391 0x0d, 0x00, 0xe0, 0x00,
13392 0x00, 0x00, 0x01, 0x00,
13393 0x00, 0x00, 0x02, 0x00,
13394 0x80, 0x10, 0x10, 0x00,
13395 0x14, 0x09, 0x00, 0x00,
13396 0x01, 0x01, 0x08, 0x0a,
13397 0x11, 0x11, 0x11, 0x11,
13398 0x11, 0x11, 0x11, 0x11,
13401 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13403 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13404 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13406 struct sk_buff *skb;
13407 u8 *tx_data, *rx_data;
13409 int num_pkts, tx_len, rx_len, i, err;
13410 struct tg3_rx_buffer_desc *desc;
13411 struct tg3_napi *tnapi, *rnapi;
13412 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13414 tnapi = &tp->napi[0];
13415 rnapi = &tp->napi[0];
13416 if (tp->irq_cnt > 1) {
13417 if (tg3_flag(tp, ENABLE_RSS))
13418 rnapi = &tp->napi[1];
13419 if (tg3_flag(tp, ENABLE_TSS))
13420 tnapi = &tp->napi[1];
13422 coal_now = tnapi->coal_now | rnapi->coal_now;
13427 skb = netdev_alloc_skb(tp->dev, tx_len);
13431 tx_data = skb_put(skb, tx_len);
13432 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13433 memset(tx_data + ETH_ALEN, 0x0, 8);
13435 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13437 if (tso_loopback) {
13438 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13440 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13441 TG3_TSO_TCP_OPT_LEN;
13443 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13444 sizeof(tg3_tso_header));
13447 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13448 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13450 /* Set the total length field in the IP header */
13451 iph->tot_len = htons((u16)(mss + hdr_len));
13453 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13454 TXD_FLAG_CPU_POST_DMA);
13456 if (tg3_flag(tp, HW_TSO_1) ||
13457 tg3_flag(tp, HW_TSO_2) ||
13458 tg3_flag(tp, HW_TSO_3)) {
13460 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13461 th = (struct tcphdr *)&tx_data[val];
13464 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13466 if (tg3_flag(tp, HW_TSO_3)) {
13467 mss |= (hdr_len & 0xc) << 12;
13468 if (hdr_len & 0x10)
13469 base_flags |= 0x00000010;
13470 base_flags |= (hdr_len & 0x3e0) << 5;
13471 } else if (tg3_flag(tp, HW_TSO_2))
13472 mss |= hdr_len << 9;
13473 else if (tg3_flag(tp, HW_TSO_1) ||
13474 tg3_asic_rev(tp) == ASIC_REV_5705) {
13475 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13477 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13480 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13483 data_off = ETH_HLEN;
13485 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13486 tx_len > VLAN_ETH_FRAME_LEN)
13487 base_flags |= TXD_FLAG_JMB_PKT;
13490 for (i = data_off; i < tx_len; i++)
13491 tx_data[i] = (u8) (i & 0xff);
13493 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13494 if (pci_dma_mapping_error(tp->pdev, map)) {
13495 dev_kfree_skb(skb);
13499 val = tnapi->tx_prod;
13500 tnapi->tx_buffers[val].skb = skb;
13501 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13503 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13508 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13510 budget = tg3_tx_avail(tnapi);
13511 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13512 base_flags | TXD_FLAG_END, mss, 0)) {
13513 tnapi->tx_buffers[val].skb = NULL;
13514 dev_kfree_skb(skb);
13520 /* Sync BD data before updating mailbox */
13523 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13524 tr32_mailbox(tnapi->prodmbox);
13528 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13529 for (i = 0; i < 35; i++) {
13530 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13535 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13536 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13537 if ((tx_idx == tnapi->tx_prod) &&
13538 (rx_idx == (rx_start_idx + num_pkts)))
13542 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13543 dev_kfree_skb(skb);
13545 if (tx_idx != tnapi->tx_prod)
13548 if (rx_idx != rx_start_idx + num_pkts)
13552 while (rx_idx != rx_start_idx) {
13553 desc = &rnapi->rx_rcb[rx_start_idx++];
13554 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13555 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13557 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13558 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13561 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13564 if (!tso_loopback) {
13565 if (rx_len != tx_len)
13568 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13569 if (opaque_key != RXD_OPAQUE_RING_STD)
13572 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13575 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13576 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13577 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13581 if (opaque_key == RXD_OPAQUE_RING_STD) {
13582 rx_data = tpr->rx_std_buffers[desc_idx].data;
13583 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13585 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13586 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13587 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13592 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13593 PCI_DMA_FROMDEVICE);
13595 rx_data += TG3_RX_OFFSET(tp);
13596 for (i = data_off; i < rx_len; i++, val++) {
13597 if (*(rx_data + i) != (u8) (val & 0xff))
13604 /* tg3_free_rings will unmap and free the rx_data */
13609 #define TG3_STD_LOOPBACK_FAILED 1
13610 #define TG3_JMB_LOOPBACK_FAILED 2
13611 #define TG3_TSO_LOOPBACK_FAILED 4
13612 #define TG3_LOOPBACK_FAILED \
13613 (TG3_STD_LOOPBACK_FAILED | \
13614 TG3_JMB_LOOPBACK_FAILED | \
13615 TG3_TSO_LOOPBACK_FAILED)
13617 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13621 u32 jmb_pkt_sz = 9000;
13624 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13626 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13627 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13629 if (!netif_running(tp->dev)) {
13630 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13633 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13637 err = tg3_reset_hw(tp, true);
13639 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13642 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646 if (tg3_flag(tp, ENABLE_RSS)) {
13649 /* Reroute all rx packets to the 1st queue */
13650 for (i = MAC_RSS_INDIR_TBL_0;
13651 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13655 /* HW errata - mac loopback fails in some cases on 5780.
13656 * Normal traffic and PHY loopback are not affected by
13657 * errata. Also, the MAC loopback test is deprecated for
13658 * all newer ASIC revisions.
13660 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13661 !tg3_flag(tp, CPMU_PRESENT)) {
13662 tg3_mac_loopback(tp, true);
13664 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13667 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13668 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13669 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13671 tg3_mac_loopback(tp, false);
13674 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13675 !tg3_flag(tp, USE_PHYLIB)) {
13678 tg3_phy_lpbk_set(tp, 0, false);
13680 /* Wait for link */
13681 for (i = 0; i < 100; i++) {
13682 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13687 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13688 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13689 if (tg3_flag(tp, TSO_CAPABLE) &&
13690 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13691 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13692 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13693 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13694 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13697 tg3_phy_lpbk_set(tp, 0, true);
13699 /* All link indications report up, but the hardware
13700 * isn't really ready for about 20 msec. Double it
13705 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13706 data[TG3_EXT_LOOPB_TEST] |=
13707 TG3_STD_LOOPBACK_FAILED;
13708 if (tg3_flag(tp, TSO_CAPABLE) &&
13709 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13710 data[TG3_EXT_LOOPB_TEST] |=
13711 TG3_TSO_LOOPBACK_FAILED;
13712 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13713 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13714 data[TG3_EXT_LOOPB_TEST] |=
13715 TG3_JMB_LOOPBACK_FAILED;
13718 /* Re-enable gphy autopowerdown. */
13719 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13720 tg3_phy_toggle_apd(tp, true);
13723 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13724 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13727 tp->phy_flags |= eee_cap;
13732 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13735 struct tg3 *tp = netdev_priv(dev);
13736 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13738 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13739 if (tg3_power_up(tp)) {
13740 etest->flags |= ETH_TEST_FL_FAILED;
13741 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13744 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13747 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13749 if (tg3_test_nvram(tp) != 0) {
13750 etest->flags |= ETH_TEST_FL_FAILED;
13751 data[TG3_NVRAM_TEST] = 1;
13753 if (!doextlpbk && tg3_test_link(tp)) {
13754 etest->flags |= ETH_TEST_FL_FAILED;
13755 data[TG3_LINK_TEST] = 1;
13757 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13758 int err, err2 = 0, irq_sync = 0;
13760 if (netif_running(dev)) {
13762 tg3_netif_stop(tp);
13766 tg3_full_lock(tp, irq_sync);
13767 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13768 err = tg3_nvram_lock(tp);
13769 tg3_halt_cpu(tp, RX_CPU_BASE);
13770 if (!tg3_flag(tp, 5705_PLUS))
13771 tg3_halt_cpu(tp, TX_CPU_BASE);
13773 tg3_nvram_unlock(tp);
13775 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13778 if (tg3_test_registers(tp) != 0) {
13779 etest->flags |= ETH_TEST_FL_FAILED;
13780 data[TG3_REGISTER_TEST] = 1;
13783 if (tg3_test_memory(tp) != 0) {
13784 etest->flags |= ETH_TEST_FL_FAILED;
13785 data[TG3_MEMORY_TEST] = 1;
13789 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13791 if (tg3_test_loopback(tp, data, doextlpbk))
13792 etest->flags |= ETH_TEST_FL_FAILED;
13794 tg3_full_unlock(tp);
13796 if (tg3_test_interrupt(tp) != 0) {
13797 etest->flags |= ETH_TEST_FL_FAILED;
13798 data[TG3_INTERRUPT_TEST] = 1;
13801 tg3_full_lock(tp, 0);
13803 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13804 if (netif_running(dev)) {
13805 tg3_flag_set(tp, INIT_COMPLETE);
13806 err2 = tg3_restart_hw(tp, true);
13808 tg3_netif_start(tp);
13811 tg3_full_unlock(tp);
13813 if (irq_sync && !err2)
13816 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13817 tg3_power_down_prepare(tp);
13821 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13823 struct tg3 *tp = netdev_priv(dev);
13824 struct hwtstamp_config stmpconf;
13826 if (!tg3_flag(tp, PTP_CAPABLE))
13827 return -EOPNOTSUPP;
13829 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13832 if (stmpconf.flags)
13835 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13836 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13839 switch (stmpconf.rx_filter) {
13840 case HWTSTAMP_FILTER_NONE:
13843 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13845 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13847 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13849 TG3_RX_PTP_CTL_SYNC_EVNT;
13851 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13853 TG3_RX_PTP_CTL_DELAY_REQ;
13855 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13859 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13863 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869 TG3_RX_PTP_CTL_SYNC_EVNT;
13871 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873 TG3_RX_PTP_CTL_SYNC_EVNT;
13875 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877 TG3_RX_PTP_CTL_SYNC_EVNT;
13879 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881 TG3_RX_PTP_CTL_DELAY_REQ;
13883 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885 TG3_RX_PTP_CTL_DELAY_REQ;
13887 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889 TG3_RX_PTP_CTL_DELAY_REQ;
13895 if (netif_running(dev) && tp->rxptpctl)
13896 tw32(TG3_RX_PTP_CTL,
13897 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13899 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13900 tg3_flag_set(tp, TX_TSTAMP_EN);
13902 tg3_flag_clear(tp, TX_TSTAMP_EN);
13904 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13908 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13910 struct tg3 *tp = netdev_priv(dev);
13911 struct hwtstamp_config stmpconf;
13913 if (!tg3_flag(tp, PTP_CAPABLE))
13914 return -EOPNOTSUPP;
13916 stmpconf.flags = 0;
13917 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13918 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13920 switch (tp->rxptpctl) {
13922 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13924 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13927 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13930 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13933 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13934 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13936 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13939 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13942 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13945 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13948 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13951 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13954 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13957 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13965 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13969 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13971 struct mii_ioctl_data *data = if_mii(ifr);
13972 struct tg3 *tp = netdev_priv(dev);
13975 if (tg3_flag(tp, USE_PHYLIB)) {
13976 struct phy_device *phydev;
13977 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13979 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13980 return phy_mii_ioctl(phydev, ifr, cmd);
13985 data->phy_id = tp->phy_addr;
13988 case SIOCGMIIREG: {
13991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13992 break; /* We have no PHY */
13994 if (!netif_running(dev))
13997 spin_lock_bh(&tp->lock);
13998 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13999 data->reg_num & 0x1f, &mii_regval);
14000 spin_unlock_bh(&tp->lock);
14002 data->val_out = mii_regval;
14008 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14009 break; /* We have no PHY */
14011 if (!netif_running(dev))
14014 spin_lock_bh(&tp->lock);
14015 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14016 data->reg_num & 0x1f, data->val_in);
14017 spin_unlock_bh(&tp->lock);
14021 case SIOCSHWTSTAMP:
14022 return tg3_hwtstamp_set(dev, ifr);
14024 case SIOCGHWTSTAMP:
14025 return tg3_hwtstamp_get(dev, ifr);
14031 return -EOPNOTSUPP;
14034 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14036 struct tg3 *tp = netdev_priv(dev);
14038 memcpy(ec, &tp->coal, sizeof(*ec));
14042 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14044 struct tg3 *tp = netdev_priv(dev);
14045 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14046 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14048 if (!tg3_flag(tp, 5705_PLUS)) {
14049 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14050 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14051 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14052 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14055 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14056 (!ec->rx_coalesce_usecs) ||
14057 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14058 (!ec->tx_coalesce_usecs) ||
14059 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14060 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14061 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14062 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14063 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14064 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14065 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14066 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14069 /* Only copy relevant parameters, ignore all others. */
14070 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14071 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14072 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14073 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14074 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14075 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14076 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14077 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14078 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14080 if (netif_running(dev)) {
14081 tg3_full_lock(tp, 0);
14082 __tg3_set_coalesce(tp, &tp->coal);
14083 tg3_full_unlock(tp);
14088 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14090 struct tg3 *tp = netdev_priv(dev);
14092 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14093 netdev_warn(tp->dev, "Board does not support EEE!\n");
14094 return -EOPNOTSUPP;
14097 if (edata->advertised != tp->eee.advertised) {
14098 netdev_warn(tp->dev,
14099 "Direct manipulation of EEE advertisement is not supported\n");
14103 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14104 netdev_warn(tp->dev,
14105 "Maximal Tx Lpi timer supported is %#x(u)\n",
14106 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14112 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14113 tg3_warn_mgmt_link_flap(tp);
14115 if (netif_running(tp->dev)) {
14116 tg3_full_lock(tp, 0);
14119 tg3_full_unlock(tp);
14125 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14127 struct tg3 *tp = netdev_priv(dev);
14129 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14130 netdev_warn(tp->dev,
14131 "Board does not support EEE!\n");
14132 return -EOPNOTSUPP;
14139 static const struct ethtool_ops tg3_ethtool_ops = {
14140 .get_drvinfo = tg3_get_drvinfo,
14141 .get_regs_len = tg3_get_regs_len,
14142 .get_regs = tg3_get_regs,
14143 .get_wol = tg3_get_wol,
14144 .set_wol = tg3_set_wol,
14145 .get_msglevel = tg3_get_msglevel,
14146 .set_msglevel = tg3_set_msglevel,
14147 .nway_reset = tg3_nway_reset,
14148 .get_link = ethtool_op_get_link,
14149 .get_eeprom_len = tg3_get_eeprom_len,
14150 .get_eeprom = tg3_get_eeprom,
14151 .set_eeprom = tg3_set_eeprom,
14152 .get_ringparam = tg3_get_ringparam,
14153 .set_ringparam = tg3_set_ringparam,
14154 .get_pauseparam = tg3_get_pauseparam,
14155 .set_pauseparam = tg3_set_pauseparam,
14156 .self_test = tg3_self_test,
14157 .get_strings = tg3_get_strings,
14158 .set_phys_id = tg3_set_phys_id,
14159 .get_ethtool_stats = tg3_get_ethtool_stats,
14160 .get_coalesce = tg3_get_coalesce,
14161 .set_coalesce = tg3_set_coalesce,
14162 .get_sset_count = tg3_get_sset_count,
14163 .get_rxnfc = tg3_get_rxnfc,
14164 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14165 .get_rxfh = tg3_get_rxfh,
14166 .set_rxfh = tg3_set_rxfh,
14167 .get_channels = tg3_get_channels,
14168 .set_channels = tg3_set_channels,
14169 .get_ts_info = tg3_get_ts_info,
14170 .get_eee = tg3_get_eee,
14171 .set_eee = tg3_set_eee,
14172 .get_link_ksettings = tg3_get_link_ksettings,
14173 .set_link_ksettings = tg3_set_link_ksettings,
14176 static void tg3_get_stats64(struct net_device *dev,
14177 struct rtnl_link_stats64 *stats)
14179 struct tg3 *tp = netdev_priv(dev);
14181 spin_lock_bh(&tp->lock);
14182 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14183 *stats = tp->net_stats_prev;
14184 spin_unlock_bh(&tp->lock);
14188 tg3_get_nstats(tp, stats);
14189 spin_unlock_bh(&tp->lock);
14192 static void tg3_set_rx_mode(struct net_device *dev)
14194 struct tg3 *tp = netdev_priv(dev);
14196 if (!netif_running(dev))
14199 tg3_full_lock(tp, 0);
14200 __tg3_set_rx_mode(dev);
14201 tg3_full_unlock(tp);
14204 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14207 dev->mtu = new_mtu;
14209 if (new_mtu > ETH_DATA_LEN) {
14210 if (tg3_flag(tp, 5780_CLASS)) {
14211 netdev_update_features(dev);
14212 tg3_flag_clear(tp, TSO_CAPABLE);
14214 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14217 if (tg3_flag(tp, 5780_CLASS)) {
14218 tg3_flag_set(tp, TSO_CAPABLE);
14219 netdev_update_features(dev);
14221 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14225 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14227 struct tg3 *tp = netdev_priv(dev);
14229 bool reset_phy = false;
14231 if (!netif_running(dev)) {
14232 /* We'll just catch it later when the
14235 tg3_set_mtu(dev, tp, new_mtu);
14241 tg3_netif_stop(tp);
14243 tg3_set_mtu(dev, tp, new_mtu);
14245 tg3_full_lock(tp, 1);
14247 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14249 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14250 * breaks all requests to 256 bytes.
14252 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14253 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14254 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14255 tg3_asic_rev(tp) == ASIC_REV_5720)
14258 err = tg3_restart_hw(tp, reset_phy);
14261 tg3_netif_start(tp);
14263 tg3_full_unlock(tp);
14271 static const struct net_device_ops tg3_netdev_ops = {
14272 .ndo_open = tg3_open,
14273 .ndo_stop = tg3_close,
14274 .ndo_start_xmit = tg3_start_xmit,
14275 .ndo_get_stats64 = tg3_get_stats64,
14276 .ndo_validate_addr = eth_validate_addr,
14277 .ndo_set_rx_mode = tg3_set_rx_mode,
14278 .ndo_set_mac_address = tg3_set_mac_addr,
14279 .ndo_do_ioctl = tg3_ioctl,
14280 .ndo_tx_timeout = tg3_tx_timeout,
14281 .ndo_change_mtu = tg3_change_mtu,
14282 .ndo_fix_features = tg3_fix_features,
14283 .ndo_set_features = tg3_set_features,
14284 #ifdef CONFIG_NET_POLL_CONTROLLER
14285 .ndo_poll_controller = tg3_poll_controller,
14289 static void tg3_get_eeprom_size(struct tg3 *tp)
14291 u32 cursize, val, magic;
14293 tp->nvram_size = EEPROM_CHIP_SIZE;
14295 if (tg3_nvram_read(tp, 0, &magic) != 0)
14298 if ((magic != TG3_EEPROM_MAGIC) &&
14299 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14300 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14304 * Size the chip by reading offsets at increasing powers of two.
14305 * When we encounter our validation signature, we know the addressing
14306 * has wrapped around, and thus have our chip size.
14310 while (cursize < tp->nvram_size) {
14311 if (tg3_nvram_read(tp, cursize, &val) != 0)
14320 tp->nvram_size = cursize;
14323 static void tg3_get_nvram_size(struct tg3 *tp)
14327 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14330 /* Selfboot format */
14331 if (val != TG3_EEPROM_MAGIC) {
14332 tg3_get_eeprom_size(tp);
14336 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14338 /* This is confusing. We want to operate on the
14339 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14340 * call will read from NVRAM and byteswap the data
14341 * according to the byteswapping settings for all
14342 * other register accesses. This ensures the data we
14343 * want will always reside in the lower 16-bits.
14344 * However, the data in NVRAM is in LE format, which
14345 * means the data from the NVRAM read will always be
14346 * opposite the endianness of the CPU. The 16-bit
14347 * byteswap then brings the data to CPU endianness.
14349 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14353 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14356 static void tg3_get_nvram_info(struct tg3 *tp)
14360 nvcfg1 = tr32(NVRAM_CFG1);
14361 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14362 tg3_flag_set(tp, FLASH);
14364 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14365 tw32(NVRAM_CFG1, nvcfg1);
14368 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14369 tg3_flag(tp, 5780_CLASS)) {
14370 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14371 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14372 tp->nvram_jedecnum = JEDEC_ATMEL;
14373 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14374 tg3_flag_set(tp, NVRAM_BUFFERED);
14376 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14377 tp->nvram_jedecnum = JEDEC_ATMEL;
14378 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14380 case FLASH_VENDOR_ATMEL_EEPROM:
14381 tp->nvram_jedecnum = JEDEC_ATMEL;
14382 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14383 tg3_flag_set(tp, NVRAM_BUFFERED);
14385 case FLASH_VENDOR_ST:
14386 tp->nvram_jedecnum = JEDEC_ST;
14387 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14388 tg3_flag_set(tp, NVRAM_BUFFERED);
14390 case FLASH_VENDOR_SAIFUN:
14391 tp->nvram_jedecnum = JEDEC_SAIFUN;
14392 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14394 case FLASH_VENDOR_SST_SMALL:
14395 case FLASH_VENDOR_SST_LARGE:
14396 tp->nvram_jedecnum = JEDEC_SST;
14397 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14401 tp->nvram_jedecnum = JEDEC_ATMEL;
14402 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14403 tg3_flag_set(tp, NVRAM_BUFFERED);
14407 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14409 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14410 case FLASH_5752PAGE_SIZE_256:
14411 tp->nvram_pagesize = 256;
14413 case FLASH_5752PAGE_SIZE_512:
14414 tp->nvram_pagesize = 512;
14416 case FLASH_5752PAGE_SIZE_1K:
14417 tp->nvram_pagesize = 1024;
14419 case FLASH_5752PAGE_SIZE_2K:
14420 tp->nvram_pagesize = 2048;
14422 case FLASH_5752PAGE_SIZE_4K:
14423 tp->nvram_pagesize = 4096;
14425 case FLASH_5752PAGE_SIZE_264:
14426 tp->nvram_pagesize = 264;
14428 case FLASH_5752PAGE_SIZE_528:
14429 tp->nvram_pagesize = 528;
14434 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14438 nvcfg1 = tr32(NVRAM_CFG1);
14440 /* NVRAM protection for TPM */
14441 if (nvcfg1 & (1 << 27))
14442 tg3_flag_set(tp, PROTECTED_NVRAM);
14444 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14445 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14446 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14447 tp->nvram_jedecnum = JEDEC_ATMEL;
14448 tg3_flag_set(tp, NVRAM_BUFFERED);
14450 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14451 tp->nvram_jedecnum = JEDEC_ATMEL;
14452 tg3_flag_set(tp, NVRAM_BUFFERED);
14453 tg3_flag_set(tp, FLASH);
14455 case FLASH_5752VENDOR_ST_M45PE10:
14456 case FLASH_5752VENDOR_ST_M45PE20:
14457 case FLASH_5752VENDOR_ST_M45PE40:
14458 tp->nvram_jedecnum = JEDEC_ST;
14459 tg3_flag_set(tp, NVRAM_BUFFERED);
14460 tg3_flag_set(tp, FLASH);
14464 if (tg3_flag(tp, FLASH)) {
14465 tg3_nvram_get_pagesize(tp, nvcfg1);
14467 /* For eeprom, set pagesize to maximum eeprom size */
14468 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14470 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14471 tw32(NVRAM_CFG1, nvcfg1);
14475 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14477 u32 nvcfg1, protect = 0;
14479 nvcfg1 = tr32(NVRAM_CFG1);
14481 /* NVRAM protection for TPM */
14482 if (nvcfg1 & (1 << 27)) {
14483 tg3_flag_set(tp, PROTECTED_NVRAM);
14487 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14489 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14490 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14491 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14492 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14493 tp->nvram_jedecnum = JEDEC_ATMEL;
14494 tg3_flag_set(tp, NVRAM_BUFFERED);
14495 tg3_flag_set(tp, FLASH);
14496 tp->nvram_pagesize = 264;
14497 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14498 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14499 tp->nvram_size = (protect ? 0x3e200 :
14500 TG3_NVRAM_SIZE_512KB);
14501 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14502 tp->nvram_size = (protect ? 0x1f200 :
14503 TG3_NVRAM_SIZE_256KB);
14505 tp->nvram_size = (protect ? 0x1f200 :
14506 TG3_NVRAM_SIZE_128KB);
14508 case FLASH_5752VENDOR_ST_M45PE10:
14509 case FLASH_5752VENDOR_ST_M45PE20:
14510 case FLASH_5752VENDOR_ST_M45PE40:
14511 tp->nvram_jedecnum = JEDEC_ST;
14512 tg3_flag_set(tp, NVRAM_BUFFERED);
14513 tg3_flag_set(tp, FLASH);
14514 tp->nvram_pagesize = 256;
14515 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14516 tp->nvram_size = (protect ?
14517 TG3_NVRAM_SIZE_64KB :
14518 TG3_NVRAM_SIZE_128KB);
14519 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14520 tp->nvram_size = (protect ?
14521 TG3_NVRAM_SIZE_64KB :
14522 TG3_NVRAM_SIZE_256KB);
14524 tp->nvram_size = (protect ?
14525 TG3_NVRAM_SIZE_128KB :
14526 TG3_NVRAM_SIZE_512KB);
14531 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14535 nvcfg1 = tr32(NVRAM_CFG1);
14537 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14538 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14539 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14540 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14541 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14542 tp->nvram_jedecnum = JEDEC_ATMEL;
14543 tg3_flag_set(tp, NVRAM_BUFFERED);
14544 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14546 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14547 tw32(NVRAM_CFG1, nvcfg1);
14549 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14550 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14551 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14552 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14553 tp->nvram_jedecnum = JEDEC_ATMEL;
14554 tg3_flag_set(tp, NVRAM_BUFFERED);
14555 tg3_flag_set(tp, FLASH);
14556 tp->nvram_pagesize = 264;
14558 case FLASH_5752VENDOR_ST_M45PE10:
14559 case FLASH_5752VENDOR_ST_M45PE20:
14560 case FLASH_5752VENDOR_ST_M45PE40:
14561 tp->nvram_jedecnum = JEDEC_ST;
14562 tg3_flag_set(tp, NVRAM_BUFFERED);
14563 tg3_flag_set(tp, FLASH);
14564 tp->nvram_pagesize = 256;
14569 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14571 u32 nvcfg1, protect = 0;
14573 nvcfg1 = tr32(NVRAM_CFG1);
14575 /* NVRAM protection for TPM */
14576 if (nvcfg1 & (1 << 27)) {
14577 tg3_flag_set(tp, PROTECTED_NVRAM);
14581 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14583 case FLASH_5761VENDOR_ATMEL_ADB021D:
14584 case FLASH_5761VENDOR_ATMEL_ADB041D:
14585 case FLASH_5761VENDOR_ATMEL_ADB081D:
14586 case FLASH_5761VENDOR_ATMEL_ADB161D:
14587 case FLASH_5761VENDOR_ATMEL_MDB021D:
14588 case FLASH_5761VENDOR_ATMEL_MDB041D:
14589 case FLASH_5761VENDOR_ATMEL_MDB081D:
14590 case FLASH_5761VENDOR_ATMEL_MDB161D:
14591 tp->nvram_jedecnum = JEDEC_ATMEL;
14592 tg3_flag_set(tp, NVRAM_BUFFERED);
14593 tg3_flag_set(tp, FLASH);
14594 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14595 tp->nvram_pagesize = 256;
14597 case FLASH_5761VENDOR_ST_A_M45PE20:
14598 case FLASH_5761VENDOR_ST_A_M45PE40:
14599 case FLASH_5761VENDOR_ST_A_M45PE80:
14600 case FLASH_5761VENDOR_ST_A_M45PE16:
14601 case FLASH_5761VENDOR_ST_M_M45PE20:
14602 case FLASH_5761VENDOR_ST_M_M45PE40:
14603 case FLASH_5761VENDOR_ST_M_M45PE80:
14604 case FLASH_5761VENDOR_ST_M_M45PE16:
14605 tp->nvram_jedecnum = JEDEC_ST;
14606 tg3_flag_set(tp, NVRAM_BUFFERED);
14607 tg3_flag_set(tp, FLASH);
14608 tp->nvram_pagesize = 256;
14613 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14616 case FLASH_5761VENDOR_ATMEL_ADB161D:
14617 case FLASH_5761VENDOR_ATMEL_MDB161D:
14618 case FLASH_5761VENDOR_ST_A_M45PE16:
14619 case FLASH_5761VENDOR_ST_M_M45PE16:
14620 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14622 case FLASH_5761VENDOR_ATMEL_ADB081D:
14623 case FLASH_5761VENDOR_ATMEL_MDB081D:
14624 case FLASH_5761VENDOR_ST_A_M45PE80:
14625 case FLASH_5761VENDOR_ST_M_M45PE80:
14626 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14628 case FLASH_5761VENDOR_ATMEL_ADB041D:
14629 case FLASH_5761VENDOR_ATMEL_MDB041D:
14630 case FLASH_5761VENDOR_ST_A_M45PE40:
14631 case FLASH_5761VENDOR_ST_M_M45PE40:
14632 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14634 case FLASH_5761VENDOR_ATMEL_ADB021D:
14635 case FLASH_5761VENDOR_ATMEL_MDB021D:
14636 case FLASH_5761VENDOR_ST_A_M45PE20:
14637 case FLASH_5761VENDOR_ST_M_M45PE20:
14638 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14644 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14646 tp->nvram_jedecnum = JEDEC_ATMEL;
14647 tg3_flag_set(tp, NVRAM_BUFFERED);
14648 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14651 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14655 nvcfg1 = tr32(NVRAM_CFG1);
14657 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14658 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14659 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14660 tp->nvram_jedecnum = JEDEC_ATMEL;
14661 tg3_flag_set(tp, NVRAM_BUFFERED);
14662 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14664 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14665 tw32(NVRAM_CFG1, nvcfg1);
14667 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14671 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14672 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14673 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14674 tp->nvram_jedecnum = JEDEC_ATMEL;
14675 tg3_flag_set(tp, NVRAM_BUFFERED);
14676 tg3_flag_set(tp, FLASH);
14678 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14680 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14681 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14682 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14684 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14685 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14686 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14688 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14689 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14690 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14694 case FLASH_5752VENDOR_ST_M45PE10:
14695 case FLASH_5752VENDOR_ST_M45PE20:
14696 case FLASH_5752VENDOR_ST_M45PE40:
14697 tp->nvram_jedecnum = JEDEC_ST;
14698 tg3_flag_set(tp, NVRAM_BUFFERED);
14699 tg3_flag_set(tp, FLASH);
14701 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14702 case FLASH_5752VENDOR_ST_M45PE10:
14703 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14705 case FLASH_5752VENDOR_ST_M45PE20:
14706 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14708 case FLASH_5752VENDOR_ST_M45PE40:
14709 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14714 tg3_flag_set(tp, NO_NVRAM);
14718 tg3_nvram_get_pagesize(tp, nvcfg1);
14719 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14720 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14724 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14728 nvcfg1 = tr32(NVRAM_CFG1);
14730 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14731 case FLASH_5717VENDOR_ATMEL_EEPROM:
14732 case FLASH_5717VENDOR_MICRO_EEPROM:
14733 tp->nvram_jedecnum = JEDEC_ATMEL;
14734 tg3_flag_set(tp, NVRAM_BUFFERED);
14735 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14737 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14738 tw32(NVRAM_CFG1, nvcfg1);
14740 case FLASH_5717VENDOR_ATMEL_MDB011D:
14741 case FLASH_5717VENDOR_ATMEL_ADB011B:
14742 case FLASH_5717VENDOR_ATMEL_ADB011D:
14743 case FLASH_5717VENDOR_ATMEL_MDB021D:
14744 case FLASH_5717VENDOR_ATMEL_ADB021B:
14745 case FLASH_5717VENDOR_ATMEL_ADB021D:
14746 case FLASH_5717VENDOR_ATMEL_45USPT:
14747 tp->nvram_jedecnum = JEDEC_ATMEL;
14748 tg3_flag_set(tp, NVRAM_BUFFERED);
14749 tg3_flag_set(tp, FLASH);
14751 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752 case FLASH_5717VENDOR_ATMEL_MDB021D:
14753 /* Detect size with tg3_nvram_get_size() */
14755 case FLASH_5717VENDOR_ATMEL_ADB021B:
14756 case FLASH_5717VENDOR_ATMEL_ADB021D:
14757 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14760 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14764 case FLASH_5717VENDOR_ST_M_M25PE10:
14765 case FLASH_5717VENDOR_ST_A_M25PE10:
14766 case FLASH_5717VENDOR_ST_M_M45PE10:
14767 case FLASH_5717VENDOR_ST_A_M45PE10:
14768 case FLASH_5717VENDOR_ST_M_M25PE20:
14769 case FLASH_5717VENDOR_ST_A_M25PE20:
14770 case FLASH_5717VENDOR_ST_M_M45PE20:
14771 case FLASH_5717VENDOR_ST_A_M45PE20:
14772 case FLASH_5717VENDOR_ST_25USPT:
14773 case FLASH_5717VENDOR_ST_45USPT:
14774 tp->nvram_jedecnum = JEDEC_ST;
14775 tg3_flag_set(tp, NVRAM_BUFFERED);
14776 tg3_flag_set(tp, FLASH);
14778 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14779 case FLASH_5717VENDOR_ST_M_M25PE20:
14780 case FLASH_5717VENDOR_ST_M_M45PE20:
14781 /* Detect size with tg3_nvram_get_size() */
14783 case FLASH_5717VENDOR_ST_A_M25PE20:
14784 case FLASH_5717VENDOR_ST_A_M45PE20:
14785 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14788 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14793 tg3_flag_set(tp, NO_NVRAM);
14797 tg3_nvram_get_pagesize(tp, nvcfg1);
14798 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14799 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14802 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14804 u32 nvcfg1, nvmpinstrp, nv_status;
14806 nvcfg1 = tr32(NVRAM_CFG1);
14807 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14809 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14810 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14811 tg3_flag_set(tp, NO_NVRAM);
14815 switch (nvmpinstrp) {
14816 case FLASH_5762_MX25L_100:
14817 case FLASH_5762_MX25L_200:
14818 case FLASH_5762_MX25L_400:
14819 case FLASH_5762_MX25L_800:
14820 case FLASH_5762_MX25L_160_320:
14821 tp->nvram_pagesize = 4096;
14822 tp->nvram_jedecnum = JEDEC_MACRONIX;
14823 tg3_flag_set(tp, NVRAM_BUFFERED);
14824 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14825 tg3_flag_set(tp, FLASH);
14826 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14828 (1 << (nv_status >> AUTOSENSE_DEVID &
14829 AUTOSENSE_DEVID_MASK)
14830 << AUTOSENSE_SIZE_IN_MB);
14833 case FLASH_5762_EEPROM_HD:
14834 nvmpinstrp = FLASH_5720_EEPROM_HD;
14836 case FLASH_5762_EEPROM_LD:
14837 nvmpinstrp = FLASH_5720_EEPROM_LD;
14839 case FLASH_5720VENDOR_M_ST_M45PE20:
14840 /* This pinstrap supports multiple sizes, so force it
14841 * to read the actual size from location 0xf0.
14843 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14848 switch (nvmpinstrp) {
14849 case FLASH_5720_EEPROM_HD:
14850 case FLASH_5720_EEPROM_LD:
14851 tp->nvram_jedecnum = JEDEC_ATMEL;
14852 tg3_flag_set(tp, NVRAM_BUFFERED);
14854 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14855 tw32(NVRAM_CFG1, nvcfg1);
14856 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14857 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14859 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14861 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14862 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14863 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14864 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14865 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14866 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14867 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14868 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14869 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14870 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14871 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14872 case FLASH_5720VENDOR_ATMEL_45USPT:
14873 tp->nvram_jedecnum = JEDEC_ATMEL;
14874 tg3_flag_set(tp, NVRAM_BUFFERED);
14875 tg3_flag_set(tp, FLASH);
14877 switch (nvmpinstrp) {
14878 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14879 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14880 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14881 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14883 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14884 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14885 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14886 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14888 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14889 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14890 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14893 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14894 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14898 case FLASH_5720VENDOR_M_ST_M25PE10:
14899 case FLASH_5720VENDOR_M_ST_M45PE10:
14900 case FLASH_5720VENDOR_A_ST_M25PE10:
14901 case FLASH_5720VENDOR_A_ST_M45PE10:
14902 case FLASH_5720VENDOR_M_ST_M25PE20:
14903 case FLASH_5720VENDOR_M_ST_M45PE20:
14904 case FLASH_5720VENDOR_A_ST_M25PE20:
14905 case FLASH_5720VENDOR_A_ST_M45PE20:
14906 case FLASH_5720VENDOR_M_ST_M25PE40:
14907 case FLASH_5720VENDOR_M_ST_M45PE40:
14908 case FLASH_5720VENDOR_A_ST_M25PE40:
14909 case FLASH_5720VENDOR_A_ST_M45PE40:
14910 case FLASH_5720VENDOR_M_ST_M25PE80:
14911 case FLASH_5720VENDOR_M_ST_M45PE80:
14912 case FLASH_5720VENDOR_A_ST_M25PE80:
14913 case FLASH_5720VENDOR_A_ST_M45PE80:
14914 case FLASH_5720VENDOR_ST_25USPT:
14915 case FLASH_5720VENDOR_ST_45USPT:
14916 tp->nvram_jedecnum = JEDEC_ST;
14917 tg3_flag_set(tp, NVRAM_BUFFERED);
14918 tg3_flag_set(tp, FLASH);
14920 switch (nvmpinstrp) {
14921 case FLASH_5720VENDOR_M_ST_M25PE20:
14922 case FLASH_5720VENDOR_M_ST_M45PE20:
14923 case FLASH_5720VENDOR_A_ST_M25PE20:
14924 case FLASH_5720VENDOR_A_ST_M45PE20:
14925 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14927 case FLASH_5720VENDOR_M_ST_M25PE40:
14928 case FLASH_5720VENDOR_M_ST_M45PE40:
14929 case FLASH_5720VENDOR_A_ST_M25PE40:
14930 case FLASH_5720VENDOR_A_ST_M45PE40:
14931 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14933 case FLASH_5720VENDOR_M_ST_M25PE80:
14934 case FLASH_5720VENDOR_M_ST_M45PE80:
14935 case FLASH_5720VENDOR_A_ST_M25PE80:
14936 case FLASH_5720VENDOR_A_ST_M45PE80:
14937 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14940 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14941 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14946 tg3_flag_set(tp, NO_NVRAM);
14950 tg3_nvram_get_pagesize(tp, nvcfg1);
14951 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14952 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14954 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14957 if (tg3_nvram_read(tp, 0, &val))
14960 if (val != TG3_EEPROM_MAGIC &&
14961 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14962 tg3_flag_set(tp, NO_NVRAM);
14966 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14967 static void tg3_nvram_init(struct tg3 *tp)
14969 if (tg3_flag(tp, IS_SSB_CORE)) {
14970 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14971 tg3_flag_clear(tp, NVRAM);
14972 tg3_flag_clear(tp, NVRAM_BUFFERED);
14973 tg3_flag_set(tp, NO_NVRAM);
14977 tw32_f(GRC_EEPROM_ADDR,
14978 (EEPROM_ADDR_FSM_RESET |
14979 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14980 EEPROM_ADDR_CLKPERD_SHIFT)));
14984 /* Enable seeprom accesses. */
14985 tw32_f(GRC_LOCAL_CTRL,
14986 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14989 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14990 tg3_asic_rev(tp) != ASIC_REV_5701) {
14991 tg3_flag_set(tp, NVRAM);
14993 if (tg3_nvram_lock(tp)) {
14994 netdev_warn(tp->dev,
14995 "Cannot get nvram lock, %s failed\n",
14999 tg3_enable_nvram_access(tp);
15001 tp->nvram_size = 0;
15003 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15004 tg3_get_5752_nvram_info(tp);
15005 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15006 tg3_get_5755_nvram_info(tp);
15007 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15008 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15009 tg3_asic_rev(tp) == ASIC_REV_5785)
15010 tg3_get_5787_nvram_info(tp);
15011 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15012 tg3_get_5761_nvram_info(tp);
15013 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15014 tg3_get_5906_nvram_info(tp);
15015 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15016 tg3_flag(tp, 57765_CLASS))
15017 tg3_get_57780_nvram_info(tp);
15018 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15019 tg3_asic_rev(tp) == ASIC_REV_5719)
15020 tg3_get_5717_nvram_info(tp);
15021 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15022 tg3_asic_rev(tp) == ASIC_REV_5762)
15023 tg3_get_5720_nvram_info(tp);
15025 tg3_get_nvram_info(tp);
15027 if (tp->nvram_size == 0)
15028 tg3_get_nvram_size(tp);
15030 tg3_disable_nvram_access(tp);
15031 tg3_nvram_unlock(tp);
15034 tg3_flag_clear(tp, NVRAM);
15035 tg3_flag_clear(tp, NVRAM_BUFFERED);
15037 tg3_get_eeprom_size(tp);
15041 struct subsys_tbl_ent {
15042 u16 subsys_vendor, subsys_devid;
15046 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15047 /* Broadcom boards. */
15048 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15050 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15052 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15054 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15056 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15058 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15060 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15072 { TG3PCI_SUBVENDOR_ID_3COM,
15073 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15074 { TG3PCI_SUBVENDOR_ID_3COM,
15075 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15076 { TG3PCI_SUBVENDOR_ID_3COM,
15077 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15078 { TG3PCI_SUBVENDOR_ID_3COM,
15079 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_3COM,
15081 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_DELL,
15085 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15086 { TG3PCI_SUBVENDOR_ID_DELL,
15087 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15088 { TG3PCI_SUBVENDOR_ID_DELL,
15089 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15090 { TG3PCI_SUBVENDOR_ID_DELL,
15091 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15093 /* Compaq boards. */
15094 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15095 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15096 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15097 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15098 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15099 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15100 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15101 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15102 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15103 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15106 { TG3PCI_SUBVENDOR_ID_IBM,
15107 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15110 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15114 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15115 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15116 tp->pdev->subsystem_vendor) &&
15117 (subsys_id_to_phy_id[i].subsys_devid ==
15118 tp->pdev->subsystem_device))
15119 return &subsys_id_to_phy_id[i];
15124 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15128 tp->phy_id = TG3_PHY_ID_INVALID;
15129 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15131 /* Assume an onboard device and WOL capable by default. */
15132 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15133 tg3_flag_set(tp, WOL_CAP);
15135 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15136 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15137 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15138 tg3_flag_set(tp, IS_NIC);
15140 val = tr32(VCPU_CFGSHDW);
15141 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15142 tg3_flag_set(tp, ASPM_WORKAROUND);
15143 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15144 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15145 tg3_flag_set(tp, WOL_ENABLE);
15146 device_set_wakeup_enable(&tp->pdev->dev, true);
15151 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15152 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15153 u32 nic_cfg, led_cfg;
15154 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15155 u32 nic_phy_id, ver, eeprom_phy_id;
15156 int eeprom_phy_serdes = 0;
15158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15159 tp->nic_sram_data_cfg = nic_cfg;
15161 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15162 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15163 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15164 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15165 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15166 (ver > 0) && (ver < 0x100))
15167 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15169 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15170 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15172 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15173 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15174 tg3_asic_rev(tp) == ASIC_REV_5720)
15175 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15177 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15178 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15179 eeprom_phy_serdes = 1;
15181 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15182 if (nic_phy_id != 0) {
15183 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15184 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15186 eeprom_phy_id = (id1 >> 16) << 10;
15187 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15188 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15192 tp->phy_id = eeprom_phy_id;
15193 if (eeprom_phy_serdes) {
15194 if (!tg3_flag(tp, 5705_PLUS))
15195 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15197 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15200 if (tg3_flag(tp, 5750_PLUS))
15201 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15202 SHASTA_EXT_LED_MODE_MASK);
15204 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15208 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15209 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15212 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15213 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15216 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15217 tp->led_ctrl = LED_CTRL_MODE_MAC;
15219 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15220 * read on some older 5700/5701 bootcode.
15222 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15223 tg3_asic_rev(tp) == ASIC_REV_5701)
15224 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15228 case SHASTA_EXT_LED_SHARED:
15229 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15230 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15231 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15232 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15233 LED_CTRL_MODE_PHY_2);
15235 if (tg3_flag(tp, 5717_PLUS) ||
15236 tg3_asic_rev(tp) == ASIC_REV_5762)
15237 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15238 LED_CTRL_BLINK_RATE_MASK;
15242 case SHASTA_EXT_LED_MAC:
15243 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15246 case SHASTA_EXT_LED_COMBO:
15247 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15248 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15249 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15250 LED_CTRL_MODE_PHY_2);
15255 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15256 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15257 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15258 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15260 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15261 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15263 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15264 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15265 if ((tp->pdev->subsystem_vendor ==
15266 PCI_VENDOR_ID_ARIMA) &&
15267 (tp->pdev->subsystem_device == 0x205a ||
15268 tp->pdev->subsystem_device == 0x2063))
15269 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15271 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15272 tg3_flag_set(tp, IS_NIC);
15275 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15276 tg3_flag_set(tp, ENABLE_ASF);
15277 if (tg3_flag(tp, 5750_PLUS))
15278 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15281 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15282 tg3_flag(tp, 5750_PLUS))
15283 tg3_flag_set(tp, ENABLE_APE);
15285 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15286 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15287 tg3_flag_clear(tp, WOL_CAP);
15289 if (tg3_flag(tp, WOL_CAP) &&
15290 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15291 tg3_flag_set(tp, WOL_ENABLE);
15292 device_set_wakeup_enable(&tp->pdev->dev, true);
15295 if (cfg2 & (1 << 17))
15296 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15298 /* serdes signal pre-emphasis in register 0x590 set by */
15299 /* bootcode if bit 18 is set */
15300 if (cfg2 & (1 << 18))
15301 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15303 if ((tg3_flag(tp, 57765_PLUS) ||
15304 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15305 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15306 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15307 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15309 if (tg3_flag(tp, PCI_EXPRESS)) {
15312 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15313 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15314 !tg3_flag(tp, 57765_PLUS) &&
15315 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15316 tg3_flag_set(tp, ASPM_WORKAROUND);
15317 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15318 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15319 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15320 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15323 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15324 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15325 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15326 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15327 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15328 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15330 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15331 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15334 if (tg3_flag(tp, WOL_CAP))
15335 device_set_wakeup_enable(&tp->pdev->dev,
15336 tg3_flag(tp, WOL_ENABLE));
15338 device_set_wakeup_capable(&tp->pdev->dev, false);
15341 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15344 u32 val2, off = offset * 8;
15346 err = tg3_nvram_lock(tp);
15350 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15351 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15352 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15353 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15356 for (i = 0; i < 100; i++) {
15357 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15358 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15359 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15365 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15367 tg3_nvram_unlock(tp);
15368 if (val2 & APE_OTP_STATUS_CMD_DONE)
15374 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15379 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15380 tw32(OTP_CTRL, cmd);
15382 /* Wait for up to 1 ms for command to execute. */
15383 for (i = 0; i < 100; i++) {
15384 val = tr32(OTP_STATUS);
15385 if (val & OTP_STATUS_CMD_DONE)
15390 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15393 /* Read the gphy configuration from the OTP region of the chip. The gphy
15394 * configuration is a 32-bit value that straddles the alignment boundary.
15395 * We do two 32-bit reads and then shift and merge the results.
15397 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15399 u32 bhalf_otp, thalf_otp;
15401 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15403 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15406 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15408 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15411 thalf_otp = tr32(OTP_READ_DATA);
15413 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15415 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15418 bhalf_otp = tr32(OTP_READ_DATA);
15420 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15423 static void tg3_phy_init_link_config(struct tg3 *tp)
15425 u32 adv = ADVERTISED_Autoneg;
15427 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15428 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15429 adv |= ADVERTISED_1000baseT_Half;
15430 adv |= ADVERTISED_1000baseT_Full;
15433 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15434 adv |= ADVERTISED_100baseT_Half |
15435 ADVERTISED_100baseT_Full |
15436 ADVERTISED_10baseT_Half |
15437 ADVERTISED_10baseT_Full |
15440 adv |= ADVERTISED_FIBRE;
15442 tp->link_config.advertising = adv;
15443 tp->link_config.speed = SPEED_UNKNOWN;
15444 tp->link_config.duplex = DUPLEX_UNKNOWN;
15445 tp->link_config.autoneg = AUTONEG_ENABLE;
15446 tp->link_config.active_speed = SPEED_UNKNOWN;
15447 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15452 static int tg3_phy_probe(struct tg3 *tp)
15454 u32 hw_phy_id_1, hw_phy_id_2;
15455 u32 hw_phy_id, hw_phy_id_masked;
15458 /* flow control autonegotiation is default behavior */
15459 tg3_flag_set(tp, PAUSE_AUTONEG);
15460 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15462 if (tg3_flag(tp, ENABLE_APE)) {
15463 switch (tp->pci_fn) {
15465 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15468 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15471 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15474 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15479 if (!tg3_flag(tp, ENABLE_ASF) &&
15480 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15481 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15482 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15483 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15485 if (tg3_flag(tp, USE_PHYLIB))
15486 return tg3_phy_init(tp);
15488 /* Reading the PHY ID register can conflict with ASF
15489 * firmware access to the PHY hardware.
15492 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15493 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15495 /* Now read the physical PHY_ID from the chip and verify
15496 * that it is sane. If it doesn't look good, we fall back
15497 * to either the hard-coded table based PHY_ID and failing
15498 * that the value found in the eeprom area.
15500 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15501 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15503 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15504 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15505 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15507 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15510 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15511 tp->phy_id = hw_phy_id;
15512 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15513 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15515 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15517 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15518 /* Do nothing, phy ID already set up in
15519 * tg3_get_eeprom_hw_cfg().
15522 struct subsys_tbl_ent *p;
15524 /* No eeprom signature? Try the hardcoded
15525 * subsys device table.
15527 p = tg3_lookup_by_subsys(tp);
15529 tp->phy_id = p->phy_id;
15530 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15531 /* For now we saw the IDs 0xbc050cd0,
15532 * 0xbc050f80 and 0xbc050c30 on devices
15533 * connected to an BCM4785 and there are
15534 * probably more. Just assume that the phy is
15535 * supported when it is connected to a SSB core
15542 tp->phy_id == TG3_PHY_ID_BCM8002)
15543 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15547 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15548 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15549 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15550 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15551 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15552 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15553 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15554 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15555 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15556 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15558 tp->eee.supported = SUPPORTED_100baseT_Full |
15559 SUPPORTED_1000baseT_Full;
15560 tp->eee.advertised = ADVERTISED_100baseT_Full |
15561 ADVERTISED_1000baseT_Full;
15562 tp->eee.eee_enabled = 1;
15563 tp->eee.tx_lpi_enabled = 1;
15564 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15567 tg3_phy_init_link_config(tp);
15569 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15570 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15571 !tg3_flag(tp, ENABLE_APE) &&
15572 !tg3_flag(tp, ENABLE_ASF)) {
15575 tg3_readphy(tp, MII_BMSR, &bmsr);
15576 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15577 (bmsr & BMSR_LSTATUS))
15578 goto skip_phy_reset;
15580 err = tg3_phy_reset(tp);
15584 tg3_phy_set_wirespeed(tp);
15586 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15587 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15588 tp->link_config.flowctrl);
15590 tg3_writephy(tp, MII_BMCR,
15591 BMCR_ANENABLE | BMCR_ANRESTART);
15596 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15597 err = tg3_init_5401phy_dsp(tp);
15601 err = tg3_init_5401phy_dsp(tp);
15607 static void tg3_read_vpd(struct tg3 *tp)
15610 unsigned int block_end, rosize, len;
15614 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15618 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15620 goto out_not_found;
15622 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15623 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15624 i += PCI_VPD_LRDT_TAG_SIZE;
15626 if (block_end > vpdlen)
15627 goto out_not_found;
15629 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15630 PCI_VPD_RO_KEYWORD_MFR_ID);
15632 len = pci_vpd_info_field_size(&vpd_data[j]);
15634 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15635 if (j + len > block_end || len != 4 ||
15636 memcmp(&vpd_data[j], "1028", 4))
15639 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15640 PCI_VPD_RO_KEYWORD_VENDOR0);
15644 len = pci_vpd_info_field_size(&vpd_data[j]);
15646 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15647 if (j + len > block_end)
15650 if (len >= sizeof(tp->fw_ver))
15651 len = sizeof(tp->fw_ver) - 1;
15652 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15653 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15658 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15659 PCI_VPD_RO_KEYWORD_PARTNO);
15661 goto out_not_found;
15663 len = pci_vpd_info_field_size(&vpd_data[i]);
15665 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15666 if (len > TG3_BPN_SIZE ||
15667 (len + i) > vpdlen)
15668 goto out_not_found;
15670 memcpy(tp->board_part_number, &vpd_data[i], len);
15674 if (tp->board_part_number[0])
15678 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15679 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15681 strcpy(tp->board_part_number, "BCM5717");
15682 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15683 strcpy(tp->board_part_number, "BCM5718");
15686 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15687 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15688 strcpy(tp->board_part_number, "BCM57780");
15689 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15690 strcpy(tp->board_part_number, "BCM57760");
15691 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15692 strcpy(tp->board_part_number, "BCM57790");
15693 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15694 strcpy(tp->board_part_number, "BCM57788");
15697 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15698 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15699 strcpy(tp->board_part_number, "BCM57761");
15700 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15701 strcpy(tp->board_part_number, "BCM57765");
15702 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15703 strcpy(tp->board_part_number, "BCM57781");
15704 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15705 strcpy(tp->board_part_number, "BCM57785");
15706 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15707 strcpy(tp->board_part_number, "BCM57791");
15708 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15709 strcpy(tp->board_part_number, "BCM57795");
15712 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15713 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15714 strcpy(tp->board_part_number, "BCM57762");
15715 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15716 strcpy(tp->board_part_number, "BCM57766");
15717 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15718 strcpy(tp->board_part_number, "BCM57782");
15719 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15720 strcpy(tp->board_part_number, "BCM57786");
15723 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15724 strcpy(tp->board_part_number, "BCM95906");
15727 strcpy(tp->board_part_number, "none");
15731 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15735 if (tg3_nvram_read(tp, offset, &val) ||
15736 (val & 0xfc000000) != 0x0c000000 ||
15737 tg3_nvram_read(tp, offset + 4, &val) ||
15744 static void tg3_read_bc_ver(struct tg3 *tp)
15746 u32 val, offset, start, ver_offset;
15748 bool newver = false;
15750 if (tg3_nvram_read(tp, 0xc, &offset) ||
15751 tg3_nvram_read(tp, 0x4, &start))
15754 offset = tg3_nvram_logical_addr(tp, offset);
15756 if (tg3_nvram_read(tp, offset, &val))
15759 if ((val & 0xfc000000) == 0x0c000000) {
15760 if (tg3_nvram_read(tp, offset + 4, &val))
15767 dst_off = strlen(tp->fw_ver);
15770 if (TG3_VER_SIZE - dst_off < 16 ||
15771 tg3_nvram_read(tp, offset + 8, &ver_offset))
15774 offset = offset + ver_offset - start;
15775 for (i = 0; i < 16; i += 4) {
15777 if (tg3_nvram_read_be32(tp, offset + i, &v))
15780 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15785 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15788 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15789 TG3_NVM_BCVER_MAJSFT;
15790 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15791 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15792 "v%d.%02d", major, minor);
15796 static void tg3_read_hwsb_ver(struct tg3 *tp)
15798 u32 val, major, minor;
15800 /* Use native endian representation */
15801 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15804 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15805 TG3_NVM_HWSB_CFG1_MAJSFT;
15806 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15807 TG3_NVM_HWSB_CFG1_MINSFT;
15809 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15812 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15814 u32 offset, major, minor, build;
15816 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15818 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15821 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15822 case TG3_EEPROM_SB_REVISION_0:
15823 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15825 case TG3_EEPROM_SB_REVISION_2:
15826 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15828 case TG3_EEPROM_SB_REVISION_3:
15829 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15831 case TG3_EEPROM_SB_REVISION_4:
15832 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15834 case TG3_EEPROM_SB_REVISION_5:
15835 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15837 case TG3_EEPROM_SB_REVISION_6:
15838 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15844 if (tg3_nvram_read(tp, offset, &val))
15847 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15848 TG3_EEPROM_SB_EDH_BLD_SHFT;
15849 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15850 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15851 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15853 if (minor > 99 || build > 26)
15856 offset = strlen(tp->fw_ver);
15857 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15858 " v%d.%02d", major, minor);
15861 offset = strlen(tp->fw_ver);
15862 if (offset < TG3_VER_SIZE - 1)
15863 tp->fw_ver[offset] = 'a' + build - 1;
15867 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15869 u32 val, offset, start;
15872 for (offset = TG3_NVM_DIR_START;
15873 offset < TG3_NVM_DIR_END;
15874 offset += TG3_NVM_DIRENT_SIZE) {
15875 if (tg3_nvram_read(tp, offset, &val))
15878 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15882 if (offset == TG3_NVM_DIR_END)
15885 if (!tg3_flag(tp, 5705_PLUS))
15886 start = 0x08000000;
15887 else if (tg3_nvram_read(tp, offset - 4, &start))
15890 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15891 !tg3_fw_img_is_valid(tp, offset) ||
15892 tg3_nvram_read(tp, offset + 8, &val))
15895 offset += val - start;
15897 vlen = strlen(tp->fw_ver);
15899 tp->fw_ver[vlen++] = ',';
15900 tp->fw_ver[vlen++] = ' ';
15902 for (i = 0; i < 4; i++) {
15904 if (tg3_nvram_read_be32(tp, offset, &v))
15907 offset += sizeof(v);
15909 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15910 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15914 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15919 static void tg3_probe_ncsi(struct tg3 *tp)
15923 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15924 if (apedata != APE_SEG_SIG_MAGIC)
15927 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15928 if (!(apedata & APE_FW_STATUS_READY))
15931 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15932 tg3_flag_set(tp, APE_HAS_NCSI);
15935 static void tg3_read_dash_ver(struct tg3 *tp)
15941 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15943 if (tg3_flag(tp, APE_HAS_NCSI))
15945 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15950 vlen = strlen(tp->fw_ver);
15952 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15954 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15955 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15956 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15957 (apedata & APE_FW_VERSION_BLDMSK));
15960 static void tg3_read_otp_ver(struct tg3 *tp)
15964 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15967 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15968 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15969 TG3_OTP_MAGIC0_VALID(val)) {
15970 u64 val64 = (u64) val << 32 | val2;
15974 for (i = 0; i < 7; i++) {
15975 if ((val64 & 0xff) == 0)
15977 ver = val64 & 0xff;
15980 vlen = strlen(tp->fw_ver);
15981 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15985 static void tg3_read_fw_ver(struct tg3 *tp)
15988 bool vpd_vers = false;
15990 if (tp->fw_ver[0] != 0)
15993 if (tg3_flag(tp, NO_NVRAM)) {
15994 strcat(tp->fw_ver, "sb");
15995 tg3_read_otp_ver(tp);
15999 if (tg3_nvram_read(tp, 0, &val))
16002 if (val == TG3_EEPROM_MAGIC)
16003 tg3_read_bc_ver(tp);
16004 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16005 tg3_read_sb_ver(tp, val);
16006 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16007 tg3_read_hwsb_ver(tp);
16009 if (tg3_flag(tp, ENABLE_ASF)) {
16010 if (tg3_flag(tp, ENABLE_APE)) {
16011 tg3_probe_ncsi(tp);
16013 tg3_read_dash_ver(tp);
16014 } else if (!vpd_vers) {
16015 tg3_read_mgmtfw_ver(tp);
16019 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16022 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16024 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16025 return TG3_RX_RET_MAX_SIZE_5717;
16026 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16027 return TG3_RX_RET_MAX_SIZE_5700;
16029 return TG3_RX_RET_MAX_SIZE_5705;
16032 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16033 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16034 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16035 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16039 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16041 struct pci_dev *peer;
16042 unsigned int func, devnr = tp->pdev->devfn & ~7;
16044 for (func = 0; func < 8; func++) {
16045 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16046 if (peer && peer != tp->pdev)
16050 /* 5704 can be configured in single-port mode, set peer to
16051 * tp->pdev in that case.
16059 * We don't need to keep the refcount elevated; there's no way
16060 * to remove one half of this device without removing the other
16067 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16069 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16070 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16073 /* All devices that use the alternate
16074 * ASIC REV location have a CPMU.
16076 tg3_flag_set(tp, CPMU_PRESENT);
16078 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16089 reg = TG3PCI_GEN2_PRODID_ASICREV;
16090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16100 reg = TG3PCI_GEN15_PRODID_ASICREV;
16102 reg = TG3PCI_PRODID_ASICREV;
16104 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16107 /* Wrong chip ID in 5752 A0. This code can be removed later
16108 * as A0 is not in production.
16110 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16111 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16113 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16114 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16116 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16117 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16118 tg3_asic_rev(tp) == ASIC_REV_5720)
16119 tg3_flag_set(tp, 5717_PLUS);
16121 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16122 tg3_asic_rev(tp) == ASIC_REV_57766)
16123 tg3_flag_set(tp, 57765_CLASS);
16125 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16126 tg3_asic_rev(tp) == ASIC_REV_5762)
16127 tg3_flag_set(tp, 57765_PLUS);
16129 /* Intentionally exclude ASIC_REV_5906 */
16130 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16131 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16133 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16134 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16135 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16136 tg3_flag(tp, 57765_PLUS))
16137 tg3_flag_set(tp, 5755_PLUS);
16139 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16140 tg3_asic_rev(tp) == ASIC_REV_5714)
16141 tg3_flag_set(tp, 5780_CLASS);
16143 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16144 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16145 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16146 tg3_flag(tp, 5755_PLUS) ||
16147 tg3_flag(tp, 5780_CLASS))
16148 tg3_flag_set(tp, 5750_PLUS);
16150 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16151 tg3_flag(tp, 5750_PLUS))
16152 tg3_flag_set(tp, 5705_PLUS);
16155 static bool tg3_10_100_only_device(struct tg3 *tp,
16156 const struct pci_device_id *ent)
16158 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16160 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16161 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16162 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16165 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16166 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16167 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16177 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16180 u32 pci_state_reg, grc_misc_cfg;
16185 /* Force memory write invalidate off. If we leave it on,
16186 * then on 5700_BX chips we have to enable a workaround.
16187 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16188 * to match the cacheline size. The Broadcom driver have this
16189 * workaround but turns MWI off all the times so never uses
16190 * it. This seems to suggest that the workaround is insufficient.
16192 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16193 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16194 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16196 /* Important! -- Make sure register accesses are byteswapped
16197 * correctly. Also, for those chips that require it, make
16198 * sure that indirect register accesses are enabled before
16199 * the first operation.
16201 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16203 tp->misc_host_ctrl |= (misc_ctrl_reg &
16204 MISC_HOST_CTRL_CHIPREV);
16205 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16206 tp->misc_host_ctrl);
16208 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16210 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16211 * we need to disable memory and use config. cycles
16212 * only to access all registers. The 5702/03 chips
16213 * can mistakenly decode the special cycles from the
16214 * ICH chipsets as memory write cycles, causing corruption
16215 * of register and memory space. Only certain ICH bridges
16216 * will drive special cycles with non-zero data during the
16217 * address phase which can fall within the 5703's address
16218 * range. This is not an ICH bug as the PCI spec allows
16219 * non-zero address during special cycles. However, only
16220 * these ICH bridges are known to drive non-zero addresses
16221 * during special cycles.
16223 * Since special cycles do not cross PCI bridges, we only
16224 * enable this workaround if the 5703 is on the secondary
16225 * bus of these ICH bridges.
16227 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16228 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16229 static struct tg3_dev_id {
16233 } ich_chipsets[] = {
16234 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16236 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16238 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16240 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16244 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16245 struct pci_dev *bridge = NULL;
16247 while (pci_id->vendor != 0) {
16248 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16254 if (pci_id->rev != PCI_ANY_ID) {
16255 if (bridge->revision > pci_id->rev)
16258 if (bridge->subordinate &&
16259 (bridge->subordinate->number ==
16260 tp->pdev->bus->number)) {
16261 tg3_flag_set(tp, ICH_WORKAROUND);
16262 pci_dev_put(bridge);
16268 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16269 static struct tg3_dev_id {
16272 } bridge_chipsets[] = {
16273 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16274 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16277 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16278 struct pci_dev *bridge = NULL;
16280 while (pci_id->vendor != 0) {
16281 bridge = pci_get_device(pci_id->vendor,
16288 if (bridge->subordinate &&
16289 (bridge->subordinate->number <=
16290 tp->pdev->bus->number) &&
16291 (bridge->subordinate->busn_res.end >=
16292 tp->pdev->bus->number)) {
16293 tg3_flag_set(tp, 5701_DMA_BUG);
16294 pci_dev_put(bridge);
16300 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16301 * DMA addresses > 40-bit. This bridge may have other additional
16302 * 57xx devices behind it in some 4-port NIC designs for example.
16303 * Any tg3 device found behind the bridge will also need the 40-bit
16306 if (tg3_flag(tp, 5780_CLASS)) {
16307 tg3_flag_set(tp, 40BIT_DMA_BUG);
16308 tp->msi_cap = tp->pdev->msi_cap;
16310 struct pci_dev *bridge = NULL;
16313 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16314 PCI_DEVICE_ID_SERVERWORKS_EPB,
16316 if (bridge && bridge->subordinate &&
16317 (bridge->subordinate->number <=
16318 tp->pdev->bus->number) &&
16319 (bridge->subordinate->busn_res.end >=
16320 tp->pdev->bus->number)) {
16321 tg3_flag_set(tp, 40BIT_DMA_BUG);
16322 pci_dev_put(bridge);
16328 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16329 tg3_asic_rev(tp) == ASIC_REV_5714)
16330 tp->pdev_peer = tg3_find_peer(tp);
16332 /* Determine TSO capabilities */
16333 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16334 ; /* Do nothing. HW bug. */
16335 else if (tg3_flag(tp, 57765_PLUS))
16336 tg3_flag_set(tp, HW_TSO_3);
16337 else if (tg3_flag(tp, 5755_PLUS) ||
16338 tg3_asic_rev(tp) == ASIC_REV_5906)
16339 tg3_flag_set(tp, HW_TSO_2);
16340 else if (tg3_flag(tp, 5750_PLUS)) {
16341 tg3_flag_set(tp, HW_TSO_1);
16342 tg3_flag_set(tp, TSO_BUG);
16343 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16344 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16345 tg3_flag_clear(tp, TSO_BUG);
16346 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16347 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16348 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16349 tg3_flag_set(tp, FW_TSO);
16350 tg3_flag_set(tp, TSO_BUG);
16351 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16352 tp->fw_needed = FIRMWARE_TG3TSO5;
16354 tp->fw_needed = FIRMWARE_TG3TSO;
16357 /* Selectively allow TSO based on operating conditions */
16358 if (tg3_flag(tp, HW_TSO_1) ||
16359 tg3_flag(tp, HW_TSO_2) ||
16360 tg3_flag(tp, HW_TSO_3) ||
16361 tg3_flag(tp, FW_TSO)) {
16362 /* For firmware TSO, assume ASF is disabled.
16363 * We'll disable TSO later if we discover ASF
16364 * is enabled in tg3_get_eeprom_hw_cfg().
16366 tg3_flag_set(tp, TSO_CAPABLE);
16368 tg3_flag_clear(tp, TSO_CAPABLE);
16369 tg3_flag_clear(tp, TSO_BUG);
16370 tp->fw_needed = NULL;
16373 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16374 tp->fw_needed = FIRMWARE_TG3;
16376 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16377 tp->fw_needed = FIRMWARE_TG357766;
16381 if (tg3_flag(tp, 5750_PLUS)) {
16382 tg3_flag_set(tp, SUPPORT_MSI);
16383 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16384 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16385 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16386 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16387 tp->pdev_peer == tp->pdev))
16388 tg3_flag_clear(tp, SUPPORT_MSI);
16390 if (tg3_flag(tp, 5755_PLUS) ||
16391 tg3_asic_rev(tp) == ASIC_REV_5906) {
16392 tg3_flag_set(tp, 1SHOT_MSI);
16395 if (tg3_flag(tp, 57765_PLUS)) {
16396 tg3_flag_set(tp, SUPPORT_MSIX);
16397 tp->irq_max = TG3_IRQ_MAX_VECS;
16403 if (tp->irq_max > 1) {
16404 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16405 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16407 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16408 tg3_asic_rev(tp) == ASIC_REV_5720)
16409 tp->txq_max = tp->irq_max - 1;
16412 if (tg3_flag(tp, 5755_PLUS) ||
16413 tg3_asic_rev(tp) == ASIC_REV_5906)
16414 tg3_flag_set(tp, SHORT_DMA_BUG);
16416 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16417 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16419 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16420 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16421 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16422 tg3_asic_rev(tp) == ASIC_REV_5762)
16423 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16425 if (tg3_flag(tp, 57765_PLUS) &&
16426 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16427 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16429 if (!tg3_flag(tp, 5705_PLUS) ||
16430 tg3_flag(tp, 5780_CLASS) ||
16431 tg3_flag(tp, USE_JUMBO_BDFLAG))
16432 tg3_flag_set(tp, JUMBO_CAPABLE);
16434 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16437 if (pci_is_pcie(tp->pdev)) {
16440 tg3_flag_set(tp, PCI_EXPRESS);
16442 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16443 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16444 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16445 tg3_flag_clear(tp, HW_TSO_2);
16446 tg3_flag_clear(tp, TSO_CAPABLE);
16448 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16449 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16450 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16451 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16452 tg3_flag_set(tp, CLKREQ_BUG);
16453 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16454 tg3_flag_set(tp, L1PLLPD_EN);
16456 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16457 /* BCM5785 devices are effectively PCIe devices, and should
16458 * follow PCIe codepaths, but do not have a PCIe capabilities
16461 tg3_flag_set(tp, PCI_EXPRESS);
16462 } else if (!tg3_flag(tp, 5705_PLUS) ||
16463 tg3_flag(tp, 5780_CLASS)) {
16464 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16465 if (!tp->pcix_cap) {
16466 dev_err(&tp->pdev->dev,
16467 "Cannot find PCI-X capability, aborting\n");
16471 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16472 tg3_flag_set(tp, PCIX_MODE);
16475 /* If we have an AMD 762 or VIA K8T800 chipset, write
16476 * reordering to the mailbox registers done by the host
16477 * controller can cause major troubles. We read back from
16478 * every mailbox register write to force the writes to be
16479 * posted to the chip in order.
16481 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16482 !tg3_flag(tp, PCI_EXPRESS))
16483 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16485 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16486 &tp->pci_cacheline_sz);
16487 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16488 &tp->pci_lat_timer);
16489 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16490 tp->pci_lat_timer < 64) {
16491 tp->pci_lat_timer = 64;
16492 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16493 tp->pci_lat_timer);
16496 /* Important! -- It is critical that the PCI-X hw workaround
16497 * situation is decided before the first MMIO register access.
16499 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16500 /* 5700 BX chips need to have their TX producer index
16501 * mailboxes written twice to workaround a bug.
16503 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16505 /* If we are in PCI-X mode, enable register write workaround.
16507 * The workaround is to use indirect register accesses
16508 * for all chip writes not to mailbox registers.
16510 if (tg3_flag(tp, PCIX_MODE)) {
16513 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16515 /* The chip can have it's power management PCI config
16516 * space registers clobbered due to this bug.
16517 * So explicitly force the chip into D0 here.
16519 pci_read_config_dword(tp->pdev,
16520 tp->pdev->pm_cap + PCI_PM_CTRL,
16522 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16523 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16524 pci_write_config_dword(tp->pdev,
16525 tp->pdev->pm_cap + PCI_PM_CTRL,
16528 /* Also, force SERR#/PERR# in PCI command. */
16529 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16530 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16531 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16535 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16536 tg3_flag_set(tp, PCI_HIGH_SPEED);
16537 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16538 tg3_flag_set(tp, PCI_32BIT);
16540 /* Chip-specific fixup from Broadcom driver */
16541 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16542 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16543 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16544 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16547 /* Default fast path register access methods */
16548 tp->read32 = tg3_read32;
16549 tp->write32 = tg3_write32;
16550 tp->read32_mbox = tg3_read32;
16551 tp->write32_mbox = tg3_write32;
16552 tp->write32_tx_mbox = tg3_write32;
16553 tp->write32_rx_mbox = tg3_write32;
16555 /* Various workaround register access methods */
16556 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16557 tp->write32 = tg3_write_indirect_reg32;
16558 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16559 (tg3_flag(tp, PCI_EXPRESS) &&
16560 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16562 * Back to back register writes can cause problems on these
16563 * chips, the workaround is to read back all reg writes
16564 * except those to mailbox regs.
16566 * See tg3_write_indirect_reg32().
16568 tp->write32 = tg3_write_flush_reg32;
16571 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16572 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16573 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16574 tp->write32_rx_mbox = tg3_write_flush_reg32;
16577 if (tg3_flag(tp, ICH_WORKAROUND)) {
16578 tp->read32 = tg3_read_indirect_reg32;
16579 tp->write32 = tg3_write_indirect_reg32;
16580 tp->read32_mbox = tg3_read_indirect_mbox;
16581 tp->write32_mbox = tg3_write_indirect_mbox;
16582 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16583 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16588 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16589 pci_cmd &= ~PCI_COMMAND_MEMORY;
16590 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16592 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16593 tp->read32_mbox = tg3_read32_mbox_5906;
16594 tp->write32_mbox = tg3_write32_mbox_5906;
16595 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16596 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16599 if (tp->write32 == tg3_write_indirect_reg32 ||
16600 (tg3_flag(tp, PCIX_MODE) &&
16601 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16602 tg3_asic_rev(tp) == ASIC_REV_5701)))
16603 tg3_flag_set(tp, SRAM_USE_CONFIG);
16605 /* The memory arbiter has to be enabled in order for SRAM accesses
16606 * to succeed. Normally on powerup the tg3 chip firmware will make
16607 * sure it is enabled, but other entities such as system netboot
16608 * code might disable it.
16610 val = tr32(MEMARB_MODE);
16611 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16613 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16614 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16615 tg3_flag(tp, 5780_CLASS)) {
16616 if (tg3_flag(tp, PCIX_MODE)) {
16617 pci_read_config_dword(tp->pdev,
16618 tp->pcix_cap + PCI_X_STATUS,
16620 tp->pci_fn = val & 0x7;
16622 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16623 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16624 tg3_asic_rev(tp) == ASIC_REV_5720) {
16625 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16626 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16627 val = tr32(TG3_CPMU_STATUS);
16629 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16630 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16632 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16633 TG3_CPMU_STATUS_FSHFT_5719;
16636 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16637 tp->write32_tx_mbox = tg3_write_flush_reg32;
16638 tp->write32_rx_mbox = tg3_write_flush_reg32;
16641 /* Get eeprom hw config before calling tg3_set_power_state().
16642 * In particular, the TG3_FLAG_IS_NIC flag must be
16643 * determined before calling tg3_set_power_state() so that
16644 * we know whether or not to switch out of Vaux power.
16645 * When the flag is set, it means that GPIO1 is used for eeprom
16646 * write protect and also implies that it is a LOM where GPIOs
16647 * are not used to switch power.
16649 tg3_get_eeprom_hw_cfg(tp);
16651 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16652 tg3_flag_clear(tp, TSO_CAPABLE);
16653 tg3_flag_clear(tp, TSO_BUG);
16654 tp->fw_needed = NULL;
16657 if (tg3_flag(tp, ENABLE_APE)) {
16658 /* Allow reads and writes to the
16659 * APE register and memory space.
16661 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16662 PCISTATE_ALLOW_APE_SHMEM_WR |
16663 PCISTATE_ALLOW_APE_PSPACE_WR;
16664 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16667 tg3_ape_lock_init(tp);
16668 tp->ape_hb_interval =
16669 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16672 /* Set up tp->grc_local_ctrl before calling
16673 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16674 * will bring 5700's external PHY out of reset.
16675 * It is also used as eeprom write protect on LOMs.
16677 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16678 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16679 tg3_flag(tp, EEPROM_WRITE_PROT))
16680 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16681 GRC_LCLCTRL_GPIO_OUTPUT1);
16682 /* Unused GPIO3 must be driven as output on 5752 because there
16683 * are no pull-up resistors on unused GPIO pins.
16685 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16686 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16688 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16689 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16690 tg3_flag(tp, 57765_CLASS))
16691 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16693 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16695 /* Turn off the debug UART. */
16696 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16697 if (tg3_flag(tp, IS_NIC))
16698 /* Keep VMain power. */
16699 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16700 GRC_LCLCTRL_GPIO_OUTPUT0;
16703 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16704 tp->grc_local_ctrl |=
16705 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16707 /* Switch out of Vaux if it is a NIC */
16708 tg3_pwrsrc_switch_to_vmain(tp);
16710 /* Derive initial jumbo mode from MTU assigned in
16711 * ether_setup() via the alloc_etherdev() call
16713 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16714 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16716 /* Determine WakeOnLan speed to use. */
16717 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16718 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16719 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16720 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16721 tg3_flag_clear(tp, WOL_SPEED_100MB);
16723 tg3_flag_set(tp, WOL_SPEED_100MB);
16726 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16727 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16729 /* A few boards don't want Ethernet@WireSpeed phy feature */
16730 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16731 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16732 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16733 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16734 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16735 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16736 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16738 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16739 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16740 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16741 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16742 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16744 if (tg3_flag(tp, 5705_PLUS) &&
16745 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16746 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16747 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16748 !tg3_flag(tp, 57765_PLUS)) {
16749 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16750 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16751 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16752 tg3_asic_rev(tp) == ASIC_REV_5761) {
16753 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16754 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16755 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16756 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16757 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16759 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16762 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16763 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16764 tp->phy_otp = tg3_read_otp_phycfg(tp);
16765 if (tp->phy_otp == 0)
16766 tp->phy_otp = TG3_OTP_DEFAULT;
16769 if (tg3_flag(tp, CPMU_PRESENT))
16770 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16772 tp->mi_mode = MAC_MI_MODE_BASE;
16774 tp->coalesce_mode = 0;
16775 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16776 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16777 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16779 /* Set these bits to enable statistics workaround. */
16780 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16781 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16782 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16783 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16784 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16785 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16788 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16789 tg3_asic_rev(tp) == ASIC_REV_57780)
16790 tg3_flag_set(tp, USE_PHYLIB);
16792 err = tg3_mdio_init(tp);
16796 /* Initialize data/descriptor byte/word swapping. */
16797 val = tr32(GRC_MODE);
16798 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16799 tg3_asic_rev(tp) == ASIC_REV_5762)
16800 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16801 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16802 GRC_MODE_B2HRX_ENABLE |
16803 GRC_MODE_HTX2B_ENABLE |
16804 GRC_MODE_HOST_STACKUP);
16806 val &= GRC_MODE_HOST_STACKUP;
16808 tw32(GRC_MODE, val | tp->grc_mode);
16810 tg3_switch_clocks(tp);
16812 /* Clear this out for sanity. */
16813 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16815 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16816 tw32(TG3PCI_REG_BASE_ADDR, 0);
16818 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16820 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16821 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16822 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16823 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16824 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16825 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16826 void __iomem *sram_base;
16828 /* Write some dummy words into the SRAM status block
16829 * area, see if it reads back correctly. If the return
16830 * value is bad, force enable the PCIX workaround.
16832 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16834 writel(0x00000000, sram_base);
16835 writel(0x00000000, sram_base + 4);
16836 writel(0xffffffff, sram_base + 4);
16837 if (readl(sram_base) != 0x00000000)
16838 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16843 tg3_nvram_init(tp);
16845 /* If the device has an NVRAM, no need to load patch firmware */
16846 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16847 !tg3_flag(tp, NO_NVRAM))
16848 tp->fw_needed = NULL;
16850 grc_misc_cfg = tr32(GRC_MISC_CFG);
16851 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16853 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16854 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16855 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16856 tg3_flag_set(tp, IS_5788);
16858 if (!tg3_flag(tp, IS_5788) &&
16859 tg3_asic_rev(tp) != ASIC_REV_5700)
16860 tg3_flag_set(tp, TAGGED_STATUS);
16861 if (tg3_flag(tp, TAGGED_STATUS)) {
16862 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16863 HOSTCC_MODE_CLRTICK_TXBD);
16865 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16866 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16867 tp->misc_host_ctrl);
16870 /* Preserve the APE MAC_MODE bits */
16871 if (tg3_flag(tp, ENABLE_APE))
16872 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16876 if (tg3_10_100_only_device(tp, ent))
16877 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16879 err = tg3_phy_probe(tp);
16881 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16882 /* ... but do not return immediately ... */
16887 tg3_read_fw_ver(tp);
16889 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16890 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16892 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16893 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16895 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16898 /* 5700 {AX,BX} chips have a broken status block link
16899 * change bit implementation, so we must use the
16900 * status register in those cases.
16902 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16903 tg3_flag_set(tp, USE_LINKCHG_REG);
16905 tg3_flag_clear(tp, USE_LINKCHG_REG);
16907 /* The led_ctrl is set during tg3_phy_probe, here we might
16908 * have to force the link status polling mechanism based
16909 * upon subsystem IDs.
16911 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16912 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16913 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16914 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16915 tg3_flag_set(tp, USE_LINKCHG_REG);
16918 /* For all SERDES we poll the MAC status register. */
16919 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16920 tg3_flag_set(tp, POLL_SERDES);
16922 tg3_flag_clear(tp, POLL_SERDES);
16924 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16925 tg3_flag_set(tp, POLL_CPMU_LINK);
16927 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16928 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16929 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16930 tg3_flag(tp, PCIX_MODE)) {
16931 tp->rx_offset = NET_SKB_PAD;
16932 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16933 tp->rx_copy_thresh = ~(u16)0;
16937 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16938 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16939 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16941 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16943 /* Increment the rx prod index on the rx std ring by at most
16944 * 8 for these chips to workaround hw errata.
16946 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16947 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16948 tg3_asic_rev(tp) == ASIC_REV_5755)
16949 tp->rx_std_max_post = 8;
16951 if (tg3_flag(tp, ASPM_WORKAROUND))
16952 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16953 PCIE_PWR_MGMT_L1_THRESH_MSK;
16958 #ifdef CONFIG_SPARC
16959 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16961 struct net_device *dev = tp->dev;
16962 struct pci_dev *pdev = tp->pdev;
16963 struct device_node *dp = pci_device_to_OF_node(pdev);
16964 const unsigned char *addr;
16967 addr = of_get_property(dp, "local-mac-address", &len);
16968 if (addr && len == ETH_ALEN) {
16969 memcpy(dev->dev_addr, addr, ETH_ALEN);
16975 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16977 struct net_device *dev = tp->dev;
16979 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16984 static int tg3_get_device_address(struct tg3 *tp)
16986 struct net_device *dev = tp->dev;
16987 u32 hi, lo, mac_offset;
16991 #ifdef CONFIG_SPARC
16992 if (!tg3_get_macaddr_sparc(tp))
16996 if (tg3_flag(tp, IS_SSB_CORE)) {
16997 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16998 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17003 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17004 tg3_flag(tp, 5780_CLASS)) {
17005 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17007 if (tg3_nvram_lock(tp))
17008 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17010 tg3_nvram_unlock(tp);
17011 } else if (tg3_flag(tp, 5717_PLUS)) {
17012 if (tp->pci_fn & 1)
17014 if (tp->pci_fn > 1)
17015 mac_offset += 0x18c;
17016 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17019 /* First try to get it from MAC address mailbox. */
17020 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17021 if ((hi >> 16) == 0x484b) {
17022 dev->dev_addr[0] = (hi >> 8) & 0xff;
17023 dev->dev_addr[1] = (hi >> 0) & 0xff;
17025 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17026 dev->dev_addr[2] = (lo >> 24) & 0xff;
17027 dev->dev_addr[3] = (lo >> 16) & 0xff;
17028 dev->dev_addr[4] = (lo >> 8) & 0xff;
17029 dev->dev_addr[5] = (lo >> 0) & 0xff;
17031 /* Some old bootcode may report a 0 MAC address in SRAM */
17032 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17035 /* Next, try NVRAM. */
17036 if (!tg3_flag(tp, NO_NVRAM) &&
17037 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17038 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17039 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17040 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17042 /* Finally just fetch it out of the MAC control regs. */
17044 hi = tr32(MAC_ADDR_0_HIGH);
17045 lo = tr32(MAC_ADDR_0_LOW);
17047 dev->dev_addr[5] = lo & 0xff;
17048 dev->dev_addr[4] = (lo >> 8) & 0xff;
17049 dev->dev_addr[3] = (lo >> 16) & 0xff;
17050 dev->dev_addr[2] = (lo >> 24) & 0xff;
17051 dev->dev_addr[1] = hi & 0xff;
17052 dev->dev_addr[0] = (hi >> 8) & 0xff;
17056 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17057 #ifdef CONFIG_SPARC
17058 if (!tg3_get_default_macaddr_sparc(tp))
17066 #define BOUNDARY_SINGLE_CACHELINE 1
17067 #define BOUNDARY_MULTI_CACHELINE 2
17069 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17071 int cacheline_size;
17075 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17077 cacheline_size = 1024;
17079 cacheline_size = (int) byte * 4;
17081 /* On 5703 and later chips, the boundary bits have no
17084 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17085 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17086 !tg3_flag(tp, PCI_EXPRESS))
17089 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17090 goal = BOUNDARY_MULTI_CACHELINE;
17092 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17093 goal = BOUNDARY_SINGLE_CACHELINE;
17099 if (tg3_flag(tp, 57765_PLUS)) {
17100 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17107 /* PCI controllers on most RISC systems tend to disconnect
17108 * when a device tries to burst across a cache-line boundary.
17109 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17111 * Unfortunately, for PCI-E there are only limited
17112 * write-side controls for this, and thus for reads
17113 * we will still get the disconnects. We'll also waste
17114 * these PCI cycles for both read and write for chips
17115 * other than 5700 and 5701 which do not implement the
17118 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17119 switch (cacheline_size) {
17124 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17125 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17126 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17128 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17129 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17134 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17135 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17139 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17140 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17143 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17144 switch (cacheline_size) {
17148 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17149 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17150 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17156 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17157 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17161 switch (cacheline_size) {
17163 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17165 DMA_RWCTRL_WRITE_BNDRY_16);
17170 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17172 DMA_RWCTRL_WRITE_BNDRY_32);
17177 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17179 DMA_RWCTRL_WRITE_BNDRY_64);
17184 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17185 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17186 DMA_RWCTRL_WRITE_BNDRY_128);
17191 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17192 DMA_RWCTRL_WRITE_BNDRY_256);
17195 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17196 DMA_RWCTRL_WRITE_BNDRY_512);
17200 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17201 DMA_RWCTRL_WRITE_BNDRY_1024);
17210 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17211 int size, bool to_device)
17213 struct tg3_internal_buffer_desc test_desc;
17214 u32 sram_dma_descs;
17217 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17219 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17220 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17221 tw32(RDMAC_STATUS, 0);
17222 tw32(WDMAC_STATUS, 0);
17224 tw32(BUFMGR_MODE, 0);
17225 tw32(FTQ_RESET, 0);
17227 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17228 test_desc.addr_lo = buf_dma & 0xffffffff;
17229 test_desc.nic_mbuf = 0x00002100;
17230 test_desc.len = size;
17233 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17234 * the *second* time the tg3 driver was getting loaded after an
17237 * Broadcom tells me:
17238 * ...the DMA engine is connected to the GRC block and a DMA
17239 * reset may affect the GRC block in some unpredictable way...
17240 * The behavior of resets to individual blocks has not been tested.
17242 * Broadcom noted the GRC reset will also reset all sub-components.
17245 test_desc.cqid_sqid = (13 << 8) | 2;
17247 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17250 test_desc.cqid_sqid = (16 << 8) | 7;
17252 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17255 test_desc.flags = 0x00000005;
17257 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17260 val = *(((u32 *)&test_desc) + i);
17261 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17262 sram_dma_descs + (i * sizeof(u32)));
17263 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17265 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17268 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17270 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17273 for (i = 0; i < 40; i++) {
17277 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17279 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17280 if ((val & 0xffff) == sram_dma_descs) {
17291 #define TEST_BUFFER_SIZE 0x2000
17293 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17294 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17298 static int tg3_test_dma(struct tg3 *tp)
17300 dma_addr_t buf_dma;
17301 u32 *buf, saved_dma_rwctrl;
17304 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17305 &buf_dma, GFP_KERNEL);
17311 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17312 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17314 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17316 if (tg3_flag(tp, 57765_PLUS))
17319 if (tg3_flag(tp, PCI_EXPRESS)) {
17320 /* DMA read watermark not used on PCIE */
17321 tp->dma_rwctrl |= 0x00180000;
17322 } else if (!tg3_flag(tp, PCIX_MODE)) {
17323 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17324 tg3_asic_rev(tp) == ASIC_REV_5750)
17325 tp->dma_rwctrl |= 0x003f0000;
17327 tp->dma_rwctrl |= 0x003f000f;
17329 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17330 tg3_asic_rev(tp) == ASIC_REV_5704) {
17331 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17332 u32 read_water = 0x7;
17334 /* If the 5704 is behind the EPB bridge, we can
17335 * do the less restrictive ONE_DMA workaround for
17336 * better performance.
17338 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17339 tg3_asic_rev(tp) == ASIC_REV_5704)
17340 tp->dma_rwctrl |= 0x8000;
17341 else if (ccval == 0x6 || ccval == 0x7)
17342 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17344 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17346 /* Set bit 23 to enable PCIX hw bug fix */
17348 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17349 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17351 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17352 /* 5780 always in PCIX mode */
17353 tp->dma_rwctrl |= 0x00144000;
17354 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17355 /* 5714 always in PCIX mode */
17356 tp->dma_rwctrl |= 0x00148000;
17358 tp->dma_rwctrl |= 0x001b000f;
17361 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17362 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17364 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17365 tg3_asic_rev(tp) == ASIC_REV_5704)
17366 tp->dma_rwctrl &= 0xfffffff0;
17368 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17369 tg3_asic_rev(tp) == ASIC_REV_5701) {
17370 /* Remove this if it causes problems for some boards. */
17371 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17373 /* On 5700/5701 chips, we need to set this bit.
17374 * Otherwise the chip will issue cacheline transactions
17375 * to streamable DMA memory with not all the byte
17376 * enables turned on. This is an error on several
17377 * RISC PCI controllers, in particular sparc64.
17379 * On 5703/5704 chips, this bit has been reassigned
17380 * a different meaning. In particular, it is used
17381 * on those chips to enable a PCI-X workaround.
17383 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17386 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17389 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17390 tg3_asic_rev(tp) != ASIC_REV_5701)
17393 /* It is best to perform DMA test with maximum write burst size
17394 * to expose the 5700/5701 write DMA bug.
17396 saved_dma_rwctrl = tp->dma_rwctrl;
17397 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17398 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17403 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17406 /* Send the buffer to the chip. */
17407 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17409 dev_err(&tp->pdev->dev,
17410 "%s: Buffer write failed. err = %d\n",
17415 /* Now read it back. */
17416 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17418 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17419 "err = %d\n", __func__, ret);
17424 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17428 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17429 DMA_RWCTRL_WRITE_BNDRY_16) {
17430 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17431 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17432 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17435 dev_err(&tp->pdev->dev,
17436 "%s: Buffer corrupted on read back! "
17437 "(%d != %d)\n", __func__, p[i], i);
17443 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17449 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17450 DMA_RWCTRL_WRITE_BNDRY_16) {
17451 /* DMA test passed without adjusting DMA boundary,
17452 * now look for chipsets that are known to expose the
17453 * DMA bug without failing the test.
17455 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17456 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17457 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17459 /* Safe to use the calculated DMA boundary. */
17460 tp->dma_rwctrl = saved_dma_rwctrl;
17463 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17467 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17472 static void tg3_init_bufmgr_config(struct tg3 *tp)
17474 if (tg3_flag(tp, 57765_PLUS)) {
17475 tp->bufmgr_config.mbuf_read_dma_low_water =
17476 DEFAULT_MB_RDMA_LOW_WATER_5705;
17477 tp->bufmgr_config.mbuf_mac_rx_low_water =
17478 DEFAULT_MB_MACRX_LOW_WATER_57765;
17479 tp->bufmgr_config.mbuf_high_water =
17480 DEFAULT_MB_HIGH_WATER_57765;
17482 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17483 DEFAULT_MB_RDMA_LOW_WATER_5705;
17484 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17485 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17486 tp->bufmgr_config.mbuf_high_water_jumbo =
17487 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17488 } else if (tg3_flag(tp, 5705_PLUS)) {
17489 tp->bufmgr_config.mbuf_read_dma_low_water =
17490 DEFAULT_MB_RDMA_LOW_WATER_5705;
17491 tp->bufmgr_config.mbuf_mac_rx_low_water =
17492 DEFAULT_MB_MACRX_LOW_WATER_5705;
17493 tp->bufmgr_config.mbuf_high_water =
17494 DEFAULT_MB_HIGH_WATER_5705;
17495 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17496 tp->bufmgr_config.mbuf_mac_rx_low_water =
17497 DEFAULT_MB_MACRX_LOW_WATER_5906;
17498 tp->bufmgr_config.mbuf_high_water =
17499 DEFAULT_MB_HIGH_WATER_5906;
17502 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17503 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17504 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17505 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17506 tp->bufmgr_config.mbuf_high_water_jumbo =
17507 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17509 tp->bufmgr_config.mbuf_read_dma_low_water =
17510 DEFAULT_MB_RDMA_LOW_WATER;
17511 tp->bufmgr_config.mbuf_mac_rx_low_water =
17512 DEFAULT_MB_MACRX_LOW_WATER;
17513 tp->bufmgr_config.mbuf_high_water =
17514 DEFAULT_MB_HIGH_WATER;
17516 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17517 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17518 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17519 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17520 tp->bufmgr_config.mbuf_high_water_jumbo =
17521 DEFAULT_MB_HIGH_WATER_JUMBO;
17524 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17525 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17528 static char *tg3_phy_string(struct tg3 *tp)
17530 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17531 case TG3_PHY_ID_BCM5400: return "5400";
17532 case TG3_PHY_ID_BCM5401: return "5401";
17533 case TG3_PHY_ID_BCM5411: return "5411";
17534 case TG3_PHY_ID_BCM5701: return "5701";
17535 case TG3_PHY_ID_BCM5703: return "5703";
17536 case TG3_PHY_ID_BCM5704: return "5704";
17537 case TG3_PHY_ID_BCM5705: return "5705";
17538 case TG3_PHY_ID_BCM5750: return "5750";
17539 case TG3_PHY_ID_BCM5752: return "5752";
17540 case TG3_PHY_ID_BCM5714: return "5714";
17541 case TG3_PHY_ID_BCM5780: return "5780";
17542 case TG3_PHY_ID_BCM5755: return "5755";
17543 case TG3_PHY_ID_BCM5787: return "5787";
17544 case TG3_PHY_ID_BCM5784: return "5784";
17545 case TG3_PHY_ID_BCM5756: return "5722/5756";
17546 case TG3_PHY_ID_BCM5906: return "5906";
17547 case TG3_PHY_ID_BCM5761: return "5761";
17548 case TG3_PHY_ID_BCM5718C: return "5718C";
17549 case TG3_PHY_ID_BCM5718S: return "5718S";
17550 case TG3_PHY_ID_BCM57765: return "57765";
17551 case TG3_PHY_ID_BCM5719C: return "5719C";
17552 case TG3_PHY_ID_BCM5720C: return "5720C";
17553 case TG3_PHY_ID_BCM5762: return "5762C";
17554 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17555 case 0: return "serdes";
17556 default: return "unknown";
17560 static char *tg3_bus_string(struct tg3 *tp, char *str)
17562 if (tg3_flag(tp, PCI_EXPRESS)) {
17563 strcpy(str, "PCI Express");
17565 } else if (tg3_flag(tp, PCIX_MODE)) {
17566 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17568 strcpy(str, "PCIX:");
17570 if ((clock_ctrl == 7) ||
17571 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17572 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17573 strcat(str, "133MHz");
17574 else if (clock_ctrl == 0)
17575 strcat(str, "33MHz");
17576 else if (clock_ctrl == 2)
17577 strcat(str, "50MHz");
17578 else if (clock_ctrl == 4)
17579 strcat(str, "66MHz");
17580 else if (clock_ctrl == 6)
17581 strcat(str, "100MHz");
17583 strcpy(str, "PCI:");
17584 if (tg3_flag(tp, PCI_HIGH_SPEED))
17585 strcat(str, "66MHz");
17587 strcat(str, "33MHz");
17589 if (tg3_flag(tp, PCI_32BIT))
17590 strcat(str, ":32-bit");
17592 strcat(str, ":64-bit");
17596 static void tg3_init_coal(struct tg3 *tp)
17598 struct ethtool_coalesce *ec = &tp->coal;
17600 memset(ec, 0, sizeof(*ec));
17601 ec->cmd = ETHTOOL_GCOALESCE;
17602 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17603 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17604 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17605 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17606 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17607 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17608 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17609 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17610 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17612 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17613 HOSTCC_MODE_CLRTICK_TXBD)) {
17614 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17615 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17616 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17617 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17620 if (tg3_flag(tp, 5705_PLUS)) {
17621 ec->rx_coalesce_usecs_irq = 0;
17622 ec->tx_coalesce_usecs_irq = 0;
17623 ec->stats_block_coalesce_usecs = 0;
17627 static int tg3_init_one(struct pci_dev *pdev,
17628 const struct pci_device_id *ent)
17630 struct net_device *dev;
17633 u32 sndmbx, rcvmbx, intmbx;
17635 u64 dma_mask, persist_dma_mask;
17636 netdev_features_t features = 0;
17638 printk_once(KERN_INFO "%s\n", version);
17640 err = pci_enable_device(pdev);
17642 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17646 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17648 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17649 goto err_out_disable_pdev;
17652 pci_set_master(pdev);
17654 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17657 goto err_out_free_res;
17660 SET_NETDEV_DEV(dev, &pdev->dev);
17662 tp = netdev_priv(dev);
17665 tp->rx_mode = TG3_DEF_RX_MODE;
17666 tp->tx_mode = TG3_DEF_TX_MODE;
17668 tp->pcierr_recovery = false;
17671 tp->msg_enable = tg3_debug;
17673 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17675 if (pdev_is_ssb_gige_core(pdev)) {
17676 tg3_flag_set(tp, IS_SSB_CORE);
17677 if (ssb_gige_must_flush_posted_writes(pdev))
17678 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17679 if (ssb_gige_one_dma_at_once(pdev))
17680 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17681 if (ssb_gige_have_roboswitch(pdev)) {
17682 tg3_flag_set(tp, USE_PHYLIB);
17683 tg3_flag_set(tp, ROBOSWITCH);
17685 if (ssb_gige_is_rgmii(pdev))
17686 tg3_flag_set(tp, RGMII_MODE);
17689 /* The word/byte swap controls here control register access byte
17690 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17693 tp->misc_host_ctrl =
17694 MISC_HOST_CTRL_MASK_PCI_INT |
17695 MISC_HOST_CTRL_WORD_SWAP |
17696 MISC_HOST_CTRL_INDIR_ACCESS |
17697 MISC_HOST_CTRL_PCISTATE_RW;
17699 /* The NONFRM (non-frame) byte/word swap controls take effect
17700 * on descriptor entries, anything which isn't packet data.
17702 * The StrongARM chips on the board (one for tx, one for rx)
17703 * are running in big-endian mode.
17705 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17706 GRC_MODE_WSWAP_NONFRM_DATA);
17707 #ifdef __BIG_ENDIAN
17708 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17710 spin_lock_init(&tp->lock);
17711 spin_lock_init(&tp->indirect_lock);
17712 INIT_WORK(&tp->reset_task, tg3_reset_task);
17714 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17716 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17718 goto err_out_free_dev;
17721 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17722 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17729 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17730 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17731 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17732 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17733 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17734 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17735 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17736 tg3_flag_set(tp, ENABLE_APE);
17737 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17738 if (!tp->aperegs) {
17739 dev_err(&pdev->dev,
17740 "Cannot map APE registers, aborting\n");
17742 goto err_out_iounmap;
17746 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17747 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17749 dev->ethtool_ops = &tg3_ethtool_ops;
17750 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17751 dev->netdev_ops = &tg3_netdev_ops;
17752 dev->irq = pdev->irq;
17754 err = tg3_get_invariants(tp, ent);
17756 dev_err(&pdev->dev,
17757 "Problem fetching invariants of chip, aborting\n");
17758 goto err_out_apeunmap;
17761 /* The EPB bridge inside 5714, 5715, and 5780 and any
17762 * device behind the EPB cannot support DMA addresses > 40-bit.
17763 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17764 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17765 * do DMA address check in tg3_start_xmit().
17767 if (tg3_flag(tp, IS_5788))
17768 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17769 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17770 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17771 #ifdef CONFIG_HIGHMEM
17772 dma_mask = DMA_BIT_MASK(64);
17775 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17777 /* Configure DMA attributes. */
17778 if (dma_mask > DMA_BIT_MASK(32)) {
17779 err = pci_set_dma_mask(pdev, dma_mask);
17781 features |= NETIF_F_HIGHDMA;
17782 err = pci_set_consistent_dma_mask(pdev,
17785 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17786 "DMA for consistent allocations\n");
17787 goto err_out_apeunmap;
17791 if (err || dma_mask == DMA_BIT_MASK(32)) {
17792 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17794 dev_err(&pdev->dev,
17795 "No usable DMA configuration, aborting\n");
17796 goto err_out_apeunmap;
17800 tg3_init_bufmgr_config(tp);
17802 /* 5700 B0 chips do not support checksumming correctly due
17803 * to hardware bugs.
17805 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17806 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17808 if (tg3_flag(tp, 5755_PLUS))
17809 features |= NETIF_F_IPV6_CSUM;
17812 /* TSO is on by default on chips that support hardware TSO.
17813 * Firmware TSO on older chips gives lower performance, so it
17814 * is off by default, but can be enabled using ethtool.
17816 if ((tg3_flag(tp, HW_TSO_1) ||
17817 tg3_flag(tp, HW_TSO_2) ||
17818 tg3_flag(tp, HW_TSO_3)) &&
17819 (features & NETIF_F_IP_CSUM))
17820 features |= NETIF_F_TSO;
17821 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17822 if (features & NETIF_F_IPV6_CSUM)
17823 features |= NETIF_F_TSO6;
17824 if (tg3_flag(tp, HW_TSO_3) ||
17825 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17826 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17827 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17828 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17829 tg3_asic_rev(tp) == ASIC_REV_57780)
17830 features |= NETIF_F_TSO_ECN;
17833 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17834 NETIF_F_HW_VLAN_CTAG_RX;
17835 dev->vlan_features |= features;
17838 * Add loopback capability only for a subset of devices that support
17839 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17840 * loopback for the remaining devices.
17842 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17843 !tg3_flag(tp, CPMU_PRESENT))
17844 /* Add the loopback capability */
17845 features |= NETIF_F_LOOPBACK;
17847 dev->hw_features |= features;
17848 dev->priv_flags |= IFF_UNICAST_FLT;
17850 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17851 dev->min_mtu = TG3_MIN_MTU;
17852 dev->max_mtu = TG3_MAX_MTU(tp);
17854 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17855 !tg3_flag(tp, TSO_CAPABLE) &&
17856 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17857 tg3_flag_set(tp, MAX_RXPEND_64);
17858 tp->rx_pending = 63;
17861 err = tg3_get_device_address(tp);
17863 dev_err(&pdev->dev,
17864 "Could not obtain valid ethernet address, aborting\n");
17865 goto err_out_apeunmap;
17868 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17869 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17870 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17871 for (i = 0; i < tp->irq_max; i++) {
17872 struct tg3_napi *tnapi = &tp->napi[i];
17875 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17877 tnapi->int_mbox = intmbx;
17883 tnapi->consmbox = rcvmbx;
17884 tnapi->prodmbox = sndmbx;
17887 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17889 tnapi->coal_now = HOSTCC_MODE_NOW;
17891 if (!tg3_flag(tp, SUPPORT_MSIX))
17895 * If we support MSIX, we'll be using RSS. If we're using
17896 * RSS, the first vector only handles link interrupts and the
17897 * remaining vectors handle rx and tx interrupts. Reuse the
17898 * mailbox values for the next iteration. The values we setup
17899 * above are still useful for the single vectored mode.
17913 * Reset chip in case UNDI or EFI driver did not shutdown
17914 * DMA self test will enable WDMAC and we'll see (spurious)
17915 * pending DMA on the PCI bus at that point.
17917 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17918 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17919 tg3_full_lock(tp, 0);
17920 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17921 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17922 tg3_full_unlock(tp);
17925 err = tg3_test_dma(tp);
17927 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17928 goto err_out_apeunmap;
17933 pci_set_drvdata(pdev, dev);
17935 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17936 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17937 tg3_asic_rev(tp) == ASIC_REV_5762)
17938 tg3_flag_set(tp, PTP_CAPABLE);
17940 tg3_timer_init(tp);
17942 tg3_carrier_off(tp);
17944 err = register_netdev(dev);
17946 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17947 goto err_out_apeunmap;
17950 if (tg3_flag(tp, PTP_CAPABLE)) {
17952 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17954 if (IS_ERR(tp->ptp_clock))
17955 tp->ptp_clock = NULL;
17958 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17959 tp->board_part_number,
17960 tg3_chip_rev_id(tp),
17961 tg3_bus_string(tp, str),
17964 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17967 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17968 ethtype = "10/100Base-TX";
17969 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17970 ethtype = "1000Base-SX";
17972 ethtype = "10/100/1000Base-T";
17974 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17975 "(WireSpeed[%d], EEE[%d])\n",
17976 tg3_phy_string(tp), ethtype,
17977 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17978 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17981 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17982 (dev->features & NETIF_F_RXCSUM) != 0,
17983 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17984 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17985 tg3_flag(tp, ENABLE_ASF) != 0,
17986 tg3_flag(tp, TSO_CAPABLE) != 0);
17987 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17989 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17990 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17992 pci_save_state(pdev);
17998 iounmap(tp->aperegs);
17999 tp->aperegs = NULL;
18012 pci_release_regions(pdev);
18014 err_out_disable_pdev:
18015 if (pci_is_enabled(pdev))
18016 pci_disable_device(pdev);
18020 static void tg3_remove_one(struct pci_dev *pdev)
18022 struct net_device *dev = pci_get_drvdata(pdev);
18025 struct tg3 *tp = netdev_priv(dev);
18029 release_firmware(tp->fw);
18031 tg3_reset_task_cancel(tp);
18033 if (tg3_flag(tp, USE_PHYLIB)) {
18038 unregister_netdev(dev);
18040 iounmap(tp->aperegs);
18041 tp->aperegs = NULL;
18048 pci_release_regions(pdev);
18049 pci_disable_device(pdev);
18053 #ifdef CONFIG_PM_SLEEP
18054 static int tg3_suspend(struct device *device)
18056 struct pci_dev *pdev = to_pci_dev(device);
18057 struct net_device *dev = pci_get_drvdata(pdev);
18058 struct tg3 *tp = netdev_priv(dev);
18063 if (!netif_running(dev))
18066 tg3_reset_task_cancel(tp);
18068 tg3_netif_stop(tp);
18070 tg3_timer_stop(tp);
18072 tg3_full_lock(tp, 1);
18073 tg3_disable_ints(tp);
18074 tg3_full_unlock(tp);
18076 netif_device_detach(dev);
18078 tg3_full_lock(tp, 0);
18079 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18080 tg3_flag_clear(tp, INIT_COMPLETE);
18081 tg3_full_unlock(tp);
18083 err = tg3_power_down_prepare(tp);
18087 tg3_full_lock(tp, 0);
18089 tg3_flag_set(tp, INIT_COMPLETE);
18090 err2 = tg3_restart_hw(tp, true);
18094 tg3_timer_start(tp);
18096 netif_device_attach(dev);
18097 tg3_netif_start(tp);
18100 tg3_full_unlock(tp);
18111 static int tg3_resume(struct device *device)
18113 struct pci_dev *pdev = to_pci_dev(device);
18114 struct net_device *dev = pci_get_drvdata(pdev);
18115 struct tg3 *tp = netdev_priv(dev);
18120 if (!netif_running(dev))
18123 netif_device_attach(dev);
18125 tg3_full_lock(tp, 0);
18127 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18129 tg3_flag_set(tp, INIT_COMPLETE);
18130 err = tg3_restart_hw(tp,
18131 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18135 tg3_timer_start(tp);
18137 tg3_netif_start(tp);
18140 tg3_full_unlock(tp);
18149 #endif /* CONFIG_PM_SLEEP */
18151 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18153 static void tg3_shutdown(struct pci_dev *pdev)
18155 struct net_device *dev = pci_get_drvdata(pdev);
18156 struct tg3 *tp = netdev_priv(dev);
18159 netif_device_detach(dev);
18161 if (netif_running(dev))
18164 if (system_state == SYSTEM_POWER_OFF)
18165 tg3_power_down(tp);
18171 * tg3_io_error_detected - called when PCI error is detected
18172 * @pdev: Pointer to PCI device
18173 * @state: The current pci connection state
18175 * This function is called after a PCI bus error affecting
18176 * this device has been detected.
18178 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18179 pci_channel_state_t state)
18181 struct net_device *netdev = pci_get_drvdata(pdev);
18182 struct tg3 *tp = netdev_priv(netdev);
18183 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18185 netdev_info(netdev, "PCI I/O error detected\n");
18189 /* We probably don't have netdev yet */
18190 if (!netdev || !netif_running(netdev))
18193 /* We needn't recover from permanent error */
18194 if (state == pci_channel_io_frozen)
18195 tp->pcierr_recovery = true;
18199 tg3_netif_stop(tp);
18201 tg3_timer_stop(tp);
18203 /* Want to make sure that the reset task doesn't run */
18204 tg3_reset_task_cancel(tp);
18206 netif_device_detach(netdev);
18208 /* Clean up software state, even if MMIO is blocked */
18209 tg3_full_lock(tp, 0);
18210 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18211 tg3_full_unlock(tp);
18214 if (state == pci_channel_io_perm_failure) {
18216 tg3_napi_enable(tp);
18219 err = PCI_ERS_RESULT_DISCONNECT;
18221 pci_disable_device(pdev);
18230 * tg3_io_slot_reset - called after the pci bus has been reset.
18231 * @pdev: Pointer to PCI device
18233 * Restart the card from scratch, as if from a cold-boot.
18234 * At this point, the card has exprienced a hard reset,
18235 * followed by fixups by BIOS, and has its config space
18236 * set up identically to what it was at cold boot.
18238 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18240 struct net_device *netdev = pci_get_drvdata(pdev);
18241 struct tg3 *tp = netdev_priv(netdev);
18242 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18247 if (pci_enable_device(pdev)) {
18248 dev_err(&pdev->dev,
18249 "Cannot re-enable PCI device after reset.\n");
18253 pci_set_master(pdev);
18254 pci_restore_state(pdev);
18255 pci_save_state(pdev);
18257 if (!netdev || !netif_running(netdev)) {
18258 rc = PCI_ERS_RESULT_RECOVERED;
18262 err = tg3_power_up(tp);
18266 rc = PCI_ERS_RESULT_RECOVERED;
18269 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18270 tg3_napi_enable(tp);
18279 * tg3_io_resume - called when traffic can start flowing again.
18280 * @pdev: Pointer to PCI device
18282 * This callback is called when the error recovery driver tells
18283 * us that its OK to resume normal operation.
18285 static void tg3_io_resume(struct pci_dev *pdev)
18287 struct net_device *netdev = pci_get_drvdata(pdev);
18288 struct tg3 *tp = netdev_priv(netdev);
18293 if (!netdev || !netif_running(netdev))
18296 tg3_full_lock(tp, 0);
18297 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18298 tg3_flag_set(tp, INIT_COMPLETE);
18299 err = tg3_restart_hw(tp, true);
18301 tg3_full_unlock(tp);
18302 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18306 netif_device_attach(netdev);
18308 tg3_timer_start(tp);
18310 tg3_netif_start(tp);
18312 tg3_full_unlock(tp);
18317 tp->pcierr_recovery = false;
18321 static const struct pci_error_handlers tg3_err_handler = {
18322 .error_detected = tg3_io_error_detected,
18323 .slot_reset = tg3_io_slot_reset,
18324 .resume = tg3_io_resume
18327 static struct pci_driver tg3_driver = {
18328 .name = DRV_MODULE_NAME,
18329 .id_table = tg3_pci_tbl,
18330 .probe = tg3_init_one,
18331 .remove = tg3_remove_one,
18332 .err_handler = &tg3_err_handler,
18333 .driver.pm = &tg3_pm_ops,
18334 .shutdown = tg3_shutdown,
18337 module_pci_driver(tg3_driver);