2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
972 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
974 tg3_ape_send_event(tp, event);
977 static void tg3_disable_ints(struct tg3 *tp)
981 tw32(TG3PCI_MISC_HOST_CTRL,
982 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
983 for (i = 0; i < tp->irq_max; i++)
984 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 static void tg3_enable_ints(struct tg3 *tp)
994 tw32(TG3PCI_MISC_HOST_CTRL,
995 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
997 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
998 for (i = 0; i < tp->irq_cnt; i++) {
999 struct tg3_napi *tnapi = &tp->napi[i];
1001 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1002 if (tg3_flag(tp, 1SHOT_MSI))
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 tp->coal_now |= tnapi->coal_now;
1008 /* Force an initial interrupt */
1009 if (!tg3_flag(tp, TAGGED_STATUS) &&
1010 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1011 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1013 tw32(HOSTCC_MODE, tp->coal_now);
1015 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1020 struct tg3 *tp = tnapi->tp;
1021 struct tg3_hw_status *sblk = tnapi->hw_status;
1022 unsigned int work_exists = 0;
1024 /* check for phy events */
1025 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1026 if (sblk->status & SD_STATUS_LINK_CHG)
1030 /* check for TX work to do */
1031 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 /* check for RX work to do */
1035 if (tnapi->rx_rcb_prod_idx &&
1036 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043 * similar to tg3_enable_ints, but it accurately determines whether there
1044 * is new work pending and can return without flushing the PIO write
1045 * which reenables interrupts
1047 static void tg3_int_reenable(struct tg3_napi *tnapi)
1049 struct tg3 *tp = tnapi->tp;
1051 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 /* When doing tagged status, this work check is unnecessary.
1055 * The last_tag we write above tells the chip which piece of
1056 * work we've completed.
1058 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1059 tw32(HOSTCC_MODE, tp->coalesce_mode |
1060 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 static void tg3_switch_clocks(struct tg3 *tp)
1066 u32 orig_clock_ctrl;
1068 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1073 orig_clock_ctrl = clock_ctrl;
1074 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1075 CLOCK_CTRL_CLKRUN_OENABLE |
1077 tp->pci_clock_ctrl = clock_ctrl;
1079 if (tg3_flag(tp, 5705_PLUS)) {
1080 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1081 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1084 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 #define PHY_BUSY_LOOPS 5000
1098 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1107 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 tg3_ape_lock(tp, tp->phy_ape_lock);
1115 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1116 MI_COM_PHY_ADDR_MASK);
1117 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1118 MI_COM_REG_ADDR_MASK);
1119 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1121 tw32_f(MAC_MI_COM, frame_val);
1123 loops = PHY_BUSY_LOOPS;
1124 while (loops != 0) {
1126 frame_val = tr32(MAC_MI_COM);
1128 if ((frame_val & MI_COM_BUSY) == 0) {
1130 frame_val = tr32(MAC_MI_COM);
1138 *val = frame_val & MI_COM_DATA_MASK;
1142 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1143 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 tg3_ape_unlock(tp, tp->phy_ape_lock);
1152 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1154 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1165 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1170 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 tg3_ape_lock(tp, tp->phy_ape_lock);
1176 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1177 MI_COM_PHY_ADDR_MASK);
1178 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1179 MI_COM_REG_ADDR_MASK);
1180 frame_val |= (val & MI_COM_DATA_MASK);
1181 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1183 tw32_f(MAC_MI_COM, frame_val);
1185 loops = PHY_BUSY_LOOPS;
1186 while (loops != 0) {
1188 frame_val = tr32(MAC_MI_COM);
1189 if ((frame_val & MI_COM_BUSY) == 0) {
1191 frame_val = tr32(MAC_MI_COM);
1201 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1202 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 tg3_ape_unlock(tp, tp->phy_ape_lock);
1211 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1213 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1229 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1239 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1262 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1268 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1273 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1284 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1289 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1290 MII_TG3_AUXCTL_SHDWSEL_MISC);
1292 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1297 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1299 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1300 set |= MII_TG3_AUXCTL_MISC_WREN;
1302 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1310 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1316 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1318 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1321 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1326 static int tg3_bmcr_reset(struct tg3 *tp)
1331 /* OK, reset it, and poll the BMCR_RESET bit until it
1332 * clears or we time out.
1334 phy_control = BMCR_RESET;
1335 err = tg3_writephy(tp, MII_BMCR, phy_control);
1341 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if ((phy_control & BMCR_RESET) == 0) {
1357 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1359 struct tg3 *tp = bp->priv;
1362 spin_lock_bh(&tp->lock);
1364 if (tg3_readphy(tp, reg, &val))
1367 spin_unlock_bh(&tp->lock);
1372 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1374 struct tg3 *tp = bp->priv;
1377 spin_lock_bh(&tp->lock);
1379 if (tg3_writephy(tp, reg, val))
1382 spin_unlock_bh(&tp->lock);
1387 static int tg3_mdio_reset(struct mii_bus *bp)
1392 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 struct phy_device *phydev;
1397 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1398 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1399 case PHY_ID_BCM50610:
1400 case PHY_ID_BCM50610M:
1401 val = MAC_PHYCFG2_50610_LED_MODES;
1403 case PHY_ID_BCMAC131:
1404 val = MAC_PHYCFG2_AC131_LED_MODES;
1406 case PHY_ID_RTL8211C:
1407 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1409 case PHY_ID_RTL8201E:
1410 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1416 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1417 tw32(MAC_PHYCFG2, val);
1419 val = tr32(MAC_PHYCFG1);
1420 val &= ~(MAC_PHYCFG1_RGMII_INT |
1421 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1422 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1423 tw32(MAC_PHYCFG1, val);
1428 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1429 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1430 MAC_PHYCFG2_FMODE_MASK_MASK |
1431 MAC_PHYCFG2_GMODE_MASK_MASK |
1432 MAC_PHYCFG2_ACT_MASK_MASK |
1433 MAC_PHYCFG2_QUAL_MASK_MASK |
1434 MAC_PHYCFG2_INBAND_ENABLE;
1436 tw32(MAC_PHYCFG2, val);
1438 val = tr32(MAC_PHYCFG1);
1439 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1440 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1441 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1442 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1443 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1444 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1445 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1447 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1448 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1449 tw32(MAC_PHYCFG1, val);
1451 val = tr32(MAC_EXT_RGMII_MODE);
1452 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1453 MAC_RGMII_MODE_RX_QUALITY |
1454 MAC_RGMII_MODE_RX_ACTIVITY |
1455 MAC_RGMII_MODE_RX_ENG_DET |
1456 MAC_RGMII_MODE_TX_ENABLE |
1457 MAC_RGMII_MODE_TX_LOWPWR |
1458 MAC_RGMII_MODE_TX_RESET);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_RGMII_MODE_RX_INT_B |
1462 MAC_RGMII_MODE_RX_QUALITY |
1463 MAC_RGMII_MODE_RX_ACTIVITY |
1464 MAC_RGMII_MODE_RX_ENG_DET;
1465 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 val |= MAC_RGMII_MODE_TX_ENABLE |
1467 MAC_RGMII_MODE_TX_LOWPWR |
1468 MAC_RGMII_MODE_TX_RESET;
1470 tw32(MAC_EXT_RGMII_MODE, val);
1473 static void tg3_mdio_start(struct tg3 *tp)
1475 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1476 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 if (tg3_flag(tp, MDIOBUS_INITED) &&
1480 tg3_asic_rev(tp) == ASIC_REV_5785)
1481 tg3_mdio_config_5785(tp);
1484 static int tg3_mdio_init(struct tg3 *tp)
1488 struct phy_device *phydev;
1490 if (tg3_flag(tp, 5717_PLUS)) {
1493 tp->phy_addr = tp->pci_fn + 1;
1495 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1496 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1498 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1499 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 tp->phy_addr = TG3_PHY_MII_ADDR;
1507 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 tp->mdio_bus = mdiobus_alloc();
1511 if (tp->mdio_bus == NULL)
1514 tp->mdio_bus->name = "tg3 mdio bus";
1515 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1516 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1517 tp->mdio_bus->priv = tp;
1518 tp->mdio_bus->parent = &tp->pdev->dev;
1519 tp->mdio_bus->read = &tg3_mdio_read;
1520 tp->mdio_bus->write = &tg3_mdio_write;
1521 tp->mdio_bus->reset = &tg3_mdio_reset;
1522 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1523 tp->mdio_bus->irq = &tp->mdio_irq[0];
1525 for (i = 0; i < PHY_MAX_ADDR; i++)
1526 tp->mdio_bus->irq[i] = PHY_POLL;
1528 /* The bus registration will look for all the PHYs on the mdio bus.
1529 * Unfortunately, it does not ensure the PHY is powered up before
1530 * accessing the PHY ID registers. A chip reset is the
1531 * quickest way to bring the device back to an operational state..
1533 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1536 i = mdiobus_register(tp->mdio_bus);
1538 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1539 mdiobus_free(tp->mdio_bus);
1543 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1545 if (!phydev || !phydev->drv) {
1546 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1547 mdiobus_unregister(tp->mdio_bus);
1548 mdiobus_free(tp->mdio_bus);
1552 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1553 case PHY_ID_BCM57780:
1554 phydev->interface = PHY_INTERFACE_MODE_GMII;
1555 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1557 case PHY_ID_BCM50610:
1558 case PHY_ID_BCM50610M:
1559 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1560 PHY_BRCM_RX_REFCLK_UNUSED |
1561 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1562 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1563 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1564 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1565 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1566 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1570 case PHY_ID_RTL8211C:
1571 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1573 case PHY_ID_RTL8201E:
1574 case PHY_ID_BCMAC131:
1575 phydev->interface = PHY_INTERFACE_MODE_MII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 tg3_flag_set(tp, MDIOBUS_INITED);
1583 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1584 tg3_mdio_config_5785(tp);
1589 static void tg3_mdio_fini(struct tg3 *tp)
1591 if (tg3_flag(tp, MDIOBUS_INITED)) {
1592 tg3_flag_clear(tp, MDIOBUS_INITED);
1593 mdiobus_unregister(tp->mdio_bus);
1594 mdiobus_free(tp->mdio_bus);
1598 /* tp->lock is held. */
1599 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 val = tr32(GRC_RX_CPU_EVENT);
1604 val |= GRC_RX_CPU_DRIVER_EVENT;
1605 tw32_f(GRC_RX_CPU_EVENT, val);
1607 tp->last_event_jiffies = jiffies;
1610 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1612 /* tp->lock is held. */
1613 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 unsigned int delay_cnt;
1619 /* If enough time has passed, no wait is necessary. */
1620 time_remain = (long)(tp->last_event_jiffies + 1 +
1621 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1623 if (time_remain < 0)
1626 /* Check if we can shorten the wait time. */
1627 delay_cnt = jiffies_to_usecs(time_remain);
1628 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1629 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1630 delay_cnt = (delay_cnt >> 3) + 1;
1632 for (i = 0; i < delay_cnt; i++) {
1633 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1639 /* tp->lock is held. */
1640 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1645 if (!tg3_readphy(tp, MII_BMCR, ®))
1647 if (!tg3_readphy(tp, MII_BMSR, ®))
1648 val |= (reg & 0xffff);
1652 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1654 if (!tg3_readphy(tp, MII_LPA, ®))
1655 val |= (reg & 0xffff);
1659 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1660 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1662 if (!tg3_readphy(tp, MII_STAT1000, ®))
1663 val |= (reg & 0xffff);
1667 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1674 /* tp->lock is held. */
1675 static void tg3_ump_link_report(struct tg3 *tp)
1679 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 tg3_phy_gather_ump_data(tp, data);
1684 tg3_wait_for_event_ack(tp);
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1687 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1693 tg3_generate_fw_event(tp);
1696 /* tp->lock is held. */
1697 static void tg3_stop_fw(struct tg3 *tp)
1699 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1700 /* Wait for RX cpu to ACK the previous event. */
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1705 tg3_generate_fw_event(tp);
1707 /* Wait for RX cpu to ACK this event. */
1708 tg3_wait_for_event_ack(tp);
1712 /* tp->lock is held. */
1713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1715 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1716 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1718 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1720 case RESET_KIND_INIT:
1721 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 case RESET_KIND_SHUTDOWN:
1726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 case RESET_KIND_SUSPEND:
1731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1744 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1746 case RESET_KIND_INIT:
1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 DRV_STATE_START_DONE);
1751 case RESET_KIND_SHUTDOWN:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_UNLOAD_DONE);
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1765 if (tg3_flag(tp, ENABLE_ASF)) {
1767 case RESET_KIND_INIT:
1768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 case RESET_KIND_SHUTDOWN:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 case RESET_KIND_SUSPEND:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 static int tg3_poll_fw(struct tg3 *tp)
1793 if (tg3_flag(tp, IS_SSB_CORE)) {
1794 /* We don't use firmware. */
1798 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1799 /* Wait up to 20ms for init done. */
1800 for (i = 0; i < 200; i++) {
1801 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1808 /* Wait for firmware initialization to complete. */
1809 for (i = 0; i < 100000; i++) {
1810 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1811 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1816 /* Chip might not be fitted with firmware. Some Sun onboard
1817 * parts are configured like that. So don't signal the timeout
1818 * of the above loop as an error, but do report the lack of
1819 * running firmware once.
1821 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1822 tg3_flag_set(tp, NO_FWARE_REPORTED);
1824 netdev_info(tp->dev, "No firmware running\n");
1827 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1828 /* The 57765 A0 needs a little more
1829 * time to do some important work.
1837 static void tg3_link_report(struct tg3 *tp)
1839 if (!netif_carrier_ok(tp->dev)) {
1840 netif_info(tp, link, tp->dev, "Link is down\n");
1841 tg3_ump_link_report(tp);
1842 } else if (netif_msg_link(tp)) {
1843 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1844 (tp->link_config.active_speed == SPEED_1000 ?
1846 (tp->link_config.active_speed == SPEED_100 ?
1848 (tp->link_config.active_duplex == DUPLEX_FULL ?
1851 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1852 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1854 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1857 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1858 netdev_info(tp->dev, "EEE is %s\n",
1859 tp->setlpicnt ? "enabled" : "disabled");
1861 tg3_ump_link_report(tp);
1864 tp->link_up = netif_carrier_ok(tp->dev);
1867 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1871 if (adv & ADVERTISE_PAUSE_CAP) {
1872 flowctrl |= FLOW_CTRL_RX;
1873 if (!(adv & ADVERTISE_PAUSE_ASYM))
1874 flowctrl |= FLOW_CTRL_TX;
1875 } else if (adv & ADVERTISE_PAUSE_ASYM)
1876 flowctrl |= FLOW_CTRL_TX;
1881 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1885 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1886 miireg = ADVERTISE_1000XPAUSE;
1887 else if (flow_ctrl & FLOW_CTRL_TX)
1888 miireg = ADVERTISE_1000XPSE_ASYM;
1889 else if (flow_ctrl & FLOW_CTRL_RX)
1890 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1897 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1901 if (adv & ADVERTISE_1000XPAUSE) {
1902 flowctrl |= FLOW_CTRL_RX;
1903 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1904 flowctrl |= FLOW_CTRL_TX;
1905 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1906 flowctrl |= FLOW_CTRL_TX;
1911 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1915 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1916 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1917 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1918 if (lcladv & ADVERTISE_1000XPAUSE)
1920 if (rmtadv & ADVERTISE_1000XPAUSE)
1927 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1931 u32 old_rx_mode = tp->rx_mode;
1932 u32 old_tx_mode = tp->tx_mode;
1934 if (tg3_flag(tp, USE_PHYLIB))
1935 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1937 autoneg = tp->link_config.autoneg;
1939 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1940 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1941 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1943 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1945 flowctrl = tp->link_config.flowctrl;
1947 tp->link_config.active_flowctrl = flowctrl;
1949 if (flowctrl & FLOW_CTRL_RX)
1950 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1952 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1954 if (old_rx_mode != tp->rx_mode)
1955 tw32_f(MAC_RX_MODE, tp->rx_mode);
1957 if (flowctrl & FLOW_CTRL_TX)
1958 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1960 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1962 if (old_tx_mode != tp->tx_mode)
1963 tw32_f(MAC_TX_MODE, tp->tx_mode);
1966 static void tg3_adjust_link(struct net_device *dev)
1968 u8 oldflowctrl, linkmesg = 0;
1969 u32 mac_mode, lcl_adv, rmt_adv;
1970 struct tg3 *tp = netdev_priv(dev);
1971 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1973 spin_lock_bh(&tp->lock);
1975 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1976 MAC_MODE_HALF_DUPLEX);
1978 oldflowctrl = tp->link_config.active_flowctrl;
1984 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1985 mac_mode |= MAC_MODE_PORT_MODE_MII;
1986 else if (phydev->speed == SPEED_1000 ||
1987 tg3_asic_rev(tp) != ASIC_REV_5785)
1988 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1990 mac_mode |= MAC_MODE_PORT_MODE_MII;
1992 if (phydev->duplex == DUPLEX_HALF)
1993 mac_mode |= MAC_MODE_HALF_DUPLEX;
1995 lcl_adv = mii_advertise_flowctrl(
1996 tp->link_config.flowctrl);
1999 rmt_adv = LPA_PAUSE_CAP;
2000 if (phydev->asym_pause)
2001 rmt_adv |= LPA_PAUSE_ASYM;
2004 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2006 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2008 if (mac_mode != tp->mac_mode) {
2009 tp->mac_mode = mac_mode;
2010 tw32_f(MAC_MODE, tp->mac_mode);
2014 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2015 if (phydev->speed == SPEED_10)
2017 MAC_MI_STAT_10MBPS_MODE |
2018 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2020 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2023 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2024 tw32(MAC_TX_LENGTHS,
2025 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2026 (6 << TX_LENGTHS_IPG_SHIFT) |
2027 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2029 tw32(MAC_TX_LENGTHS,
2030 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2031 (6 << TX_LENGTHS_IPG_SHIFT) |
2032 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2034 if (phydev->link != tp->old_link ||
2035 phydev->speed != tp->link_config.active_speed ||
2036 phydev->duplex != tp->link_config.active_duplex ||
2037 oldflowctrl != tp->link_config.active_flowctrl)
2040 tp->old_link = phydev->link;
2041 tp->link_config.active_speed = phydev->speed;
2042 tp->link_config.active_duplex = phydev->duplex;
2044 spin_unlock_bh(&tp->lock);
2047 tg3_link_report(tp);
2050 static int tg3_phy_init(struct tg3 *tp)
2052 struct phy_device *phydev;
2054 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2057 /* Bring the PHY back to a known state. */
2060 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2062 /* Attach the MAC to the PHY. */
2063 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2064 tg3_adjust_link, phydev->interface);
2065 if (IS_ERR(phydev)) {
2066 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2067 return PTR_ERR(phydev);
2070 /* Mask with MAC supported features. */
2071 switch (phydev->interface) {
2072 case PHY_INTERFACE_MODE_GMII:
2073 case PHY_INTERFACE_MODE_RGMII:
2074 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2075 phydev->supported &= (PHY_GBIT_FEATURES |
2077 SUPPORTED_Asym_Pause);
2081 case PHY_INTERFACE_MODE_MII:
2082 phydev->supported &= (PHY_BASIC_FEATURES |
2084 SUPPORTED_Asym_Pause);
2087 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2091 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2093 phydev->advertising = phydev->supported;
2098 static void tg3_phy_start(struct tg3 *tp)
2100 struct phy_device *phydev;
2102 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2107 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2108 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2109 phydev->speed = tp->link_config.speed;
2110 phydev->duplex = tp->link_config.duplex;
2111 phydev->autoneg = tp->link_config.autoneg;
2112 phydev->advertising = tp->link_config.advertising;
2117 phy_start_aneg(phydev);
2120 static void tg3_phy_stop(struct tg3 *tp)
2122 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2125 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2128 static void tg3_phy_fini(struct tg3 *tp)
2130 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2131 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2132 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2136 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2141 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2144 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2145 /* Cannot do read-modify-write on 5401 */
2146 err = tg3_phy_auxctl_write(tp,
2147 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2153 err = tg3_phy_auxctl_read(tp,
2154 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2158 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2159 err = tg3_phy_auxctl_write(tp,
2160 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2166 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2170 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2173 tg3_writephy(tp, MII_TG3_FET_TEST,
2174 phytest | MII_TG3_FET_SHADOW_EN);
2175 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2177 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2179 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2180 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2182 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2186 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2190 if (!tg3_flag(tp, 5705_PLUS) ||
2191 (tg3_flag(tp, 5717_PLUS) &&
2192 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2195 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2196 tg3_phy_fet_toggle_apd(tp, enable);
2200 reg = MII_TG3_MISC_SHDW_WREN |
2201 MII_TG3_MISC_SHDW_SCR5_SEL |
2202 MII_TG3_MISC_SHDW_SCR5_LPED |
2203 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2204 MII_TG3_MISC_SHDW_SCR5_SDTL |
2205 MII_TG3_MISC_SHDW_SCR5_C125OE;
2206 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2207 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2209 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2212 reg = MII_TG3_MISC_SHDW_WREN |
2213 MII_TG3_MISC_SHDW_APD_SEL |
2214 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2216 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2218 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2221 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2225 if (!tg3_flag(tp, 5705_PLUS) ||
2226 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2229 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2232 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2233 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2235 tg3_writephy(tp, MII_TG3_FET_TEST,
2236 ephy | MII_TG3_FET_SHADOW_EN);
2237 if (!tg3_readphy(tp, reg, &phy)) {
2239 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2241 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2242 tg3_writephy(tp, reg, phy);
2244 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2249 ret = tg3_phy_auxctl_read(tp,
2250 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2253 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2255 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2256 tg3_phy_auxctl_write(tp,
2257 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2262 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2267 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2270 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2272 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2273 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2276 static void tg3_phy_apply_otp(struct tg3 *tp)
2285 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2288 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2289 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2290 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2292 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2293 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2294 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2296 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2297 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2298 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2300 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2301 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2303 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2306 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2307 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2308 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2310 tg3_phy_toggle_auxctl_smdsp(tp, false);
2313 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2316 struct ethtool_eee *dest = &tp->eee;
2318 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2324 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2327 /* Pull eee_active */
2328 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2329 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2330 dest->eee_active = 1;
2332 dest->eee_active = 0;
2334 /* Pull lp advertised settings */
2335 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2337 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2339 /* Pull advertised and eee_enabled settings */
2340 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2342 dest->eee_enabled = !!val;
2343 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2345 /* Pull tx_lpi_enabled */
2346 val = tr32(TG3_CPMU_EEE_MODE);
2347 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2349 /* Pull lpi timer value */
2350 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2353 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2362 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2364 tp->link_config.active_duplex == DUPLEX_FULL &&
2365 (tp->link_config.active_speed == SPEED_100 ||
2366 tp->link_config.active_speed == SPEED_1000)) {
2369 if (tp->link_config.active_speed == SPEED_1000)
2370 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2372 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2374 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2376 tg3_eee_pull_config(tp, NULL);
2377 if (tp->eee.eee_active)
2381 if (!tp->setlpicnt) {
2382 if (current_link_up &&
2383 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2384 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2385 tg3_phy_toggle_auxctl_smdsp(tp, false);
2388 val = tr32(TG3_CPMU_EEE_MODE);
2389 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2393 static void tg3_phy_eee_enable(struct tg3 *tp)
2397 if (tp->link_config.active_speed == SPEED_1000 &&
2398 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2399 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2400 tg3_flag(tp, 57765_CLASS)) &&
2401 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2402 val = MII_TG3_DSP_TAP26_ALNOKO |
2403 MII_TG3_DSP_TAP26_RMRXSTO;
2404 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2405 tg3_phy_toggle_auxctl_smdsp(tp, false);
2408 val = tr32(TG3_CPMU_EEE_MODE);
2409 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2412 static int tg3_wait_macro_done(struct tg3 *tp)
2419 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2420 if ((tmp32 & 0x1000) == 0)
2430 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2432 static const u32 test_pat[4][6] = {
2433 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2434 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2435 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2436 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2440 for (chan = 0; chan < 4; chan++) {
2443 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2444 (chan * 0x2000) | 0x0200);
2445 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2447 for (i = 0; i < 6; i++)
2448 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2451 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2452 if (tg3_wait_macro_done(tp)) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2458 (chan * 0x2000) | 0x0200);
2459 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2460 if (tg3_wait_macro_done(tp)) {
2465 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2466 if (tg3_wait_macro_done(tp)) {
2471 for (i = 0; i < 6; i += 2) {
2474 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2475 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2476 tg3_wait_macro_done(tp)) {
2482 if (low != test_pat[chan][i] ||
2483 high != test_pat[chan][i+1]) {
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2485 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2486 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2496 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2500 for (chan = 0; chan < 4; chan++) {
2503 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2504 (chan * 0x2000) | 0x0200);
2505 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2506 for (i = 0; i < 6; i++)
2507 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2508 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2509 if (tg3_wait_macro_done(tp))
2516 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2518 u32 reg32, phy9_orig;
2519 int retries, do_phy_reset, err;
2525 err = tg3_bmcr_reset(tp);
2531 /* Disable transmitter and interrupt. */
2532 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2536 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2538 /* Set full-duplex, 1000 mbps. */
2539 tg3_writephy(tp, MII_BMCR,
2540 BMCR_FULLDPLX | BMCR_SPEED1000);
2542 /* Set to master mode. */
2543 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2546 tg3_writephy(tp, MII_CTRL1000,
2547 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2549 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2553 /* Block the PHY control access. */
2554 tg3_phydsp_write(tp, 0x8005, 0x0800);
2556 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2559 } while (--retries);
2561 err = tg3_phy_reset_chanpat(tp);
2565 tg3_phydsp_write(tp, 0x8005, 0x0000);
2567 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2568 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2570 tg3_phy_toggle_auxctl_smdsp(tp, false);
2572 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2574 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2576 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2583 static void tg3_carrier_off(struct tg3 *tp)
2585 netif_carrier_off(tp->dev);
2586 tp->link_up = false;
2589 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2591 if (tg3_flag(tp, ENABLE_ASF))
2592 netdev_warn(tp->dev,
2593 "Management side-band traffic will be interrupted during phy settings change\n");
2596 /* This will reset the tigon3 PHY if there is no valid
2597 * link unless the FORCE argument is non-zero.
2599 static int tg3_phy_reset(struct tg3 *tp)
2604 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2605 val = tr32(GRC_MISC_CFG);
2606 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2609 err = tg3_readphy(tp, MII_BMSR, &val);
2610 err |= tg3_readphy(tp, MII_BMSR, &val);
2614 if (netif_running(tp->dev) && tp->link_up) {
2615 netif_carrier_off(tp->dev);
2616 tg3_link_report(tp);
2619 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2620 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2621 tg3_asic_rev(tp) == ASIC_REV_5705) {
2622 err = tg3_phy_reset_5703_4_5(tp);
2629 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2630 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2631 cpmuctrl = tr32(TG3_CPMU_CTRL);
2632 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2634 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2637 err = tg3_bmcr_reset(tp);
2641 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2642 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2643 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2645 tw32(TG3_CPMU_CTRL, cpmuctrl);
2648 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2649 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2650 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2651 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2652 CPMU_LSPD_1000MB_MACCLK_12_5) {
2653 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2655 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2659 if (tg3_flag(tp, 5717_PLUS) &&
2660 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2663 tg3_phy_apply_otp(tp);
2665 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2666 tg3_phy_toggle_apd(tp, true);
2668 tg3_phy_toggle_apd(tp, false);
2671 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2672 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2673 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2674 tg3_phydsp_write(tp, 0x000a, 0x0323);
2675 tg3_phy_toggle_auxctl_smdsp(tp, false);
2678 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2679 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2680 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2683 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2684 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2685 tg3_phydsp_write(tp, 0x000a, 0x310b);
2686 tg3_phydsp_write(tp, 0x201f, 0x9506);
2687 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2688 tg3_phy_toggle_auxctl_smdsp(tp, false);
2690 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2691 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2692 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2693 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2694 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2695 tg3_writephy(tp, MII_TG3_TEST1,
2696 MII_TG3_TEST1_TRIM_EN | 0x4);
2698 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2700 tg3_phy_toggle_auxctl_smdsp(tp, false);
2704 /* Set Extended packet length bit (bit 14) on all chips that */
2705 /* support jumbo frames */
2706 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2707 /* Cannot do read-modify-write on 5401 */
2708 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2709 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2710 /* Set bit 14 with read-modify-write to preserve other bits */
2711 err = tg3_phy_auxctl_read(tp,
2712 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2714 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2715 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2718 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2719 * jumbo frames transmission.
2721 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2722 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2723 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2724 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2727 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2728 /* adjust output voltage */
2729 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2732 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2733 tg3_phydsp_write(tp, 0xffb, 0x4000);
2735 tg3_phy_toggle_automdix(tp, true);
2736 tg3_phy_set_wirespeed(tp);
2740 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2741 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2742 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2743 TG3_GPIO_MSG_NEED_VAUX)
2744 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2745 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2746 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2747 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2748 (TG3_GPIO_MSG_DRVR_PRES << 12))
2750 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2751 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2752 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2753 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2754 (TG3_GPIO_MSG_NEED_VAUX << 12))
2756 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2760 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2761 tg3_asic_rev(tp) == ASIC_REV_5719)
2762 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2764 status = tr32(TG3_CPMU_DRV_STATUS);
2766 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2767 status &= ~(TG3_GPIO_MSG_MASK << shift);
2768 status |= (newstat << shift);
2770 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2771 tg3_asic_rev(tp) == ASIC_REV_5719)
2772 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2774 tw32(TG3_CPMU_DRV_STATUS, status);
2776 return status >> TG3_APE_GPIO_MSG_SHIFT;
2779 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2781 if (!tg3_flag(tp, IS_NIC))
2784 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2785 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2786 tg3_asic_rev(tp) == ASIC_REV_5720) {
2787 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2790 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2792 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2797 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2804 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2808 if (!tg3_flag(tp, IS_NIC) ||
2809 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2810 tg3_asic_rev(tp) == ASIC_REV_5701)
2813 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2815 tw32_wait_f(GRC_LOCAL_CTRL,
2816 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2817 TG3_GRC_LCLCTL_PWRSW_DELAY);
2819 tw32_wait_f(GRC_LOCAL_CTRL,
2821 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 tw32_wait_f(GRC_LOCAL_CTRL,
2824 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2830 if (!tg3_flag(tp, IS_NIC))
2833 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2834 tg3_asic_rev(tp) == ASIC_REV_5701) {
2835 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2836 (GRC_LCLCTRL_GPIO_OE0 |
2837 GRC_LCLCTRL_GPIO_OE1 |
2838 GRC_LCLCTRL_GPIO_OE2 |
2839 GRC_LCLCTRL_GPIO_OUTPUT0 |
2840 GRC_LCLCTRL_GPIO_OUTPUT1),
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2843 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2844 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2845 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2846 GRC_LCLCTRL_GPIO_OE1 |
2847 GRC_LCLCTRL_GPIO_OE2 |
2848 GRC_LCLCTRL_GPIO_OUTPUT0 |
2849 GRC_LCLCTRL_GPIO_OUTPUT1 |
2851 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2855 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2859 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 u32 grc_local_ctrl = 0;
2865 /* Workaround to prevent overdrawing Amps. */
2866 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2867 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2868 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2873 /* On 5753 and variants, GPIO2 cannot be used. */
2874 no_gpio2 = tp->nic_sram_data_cfg &
2875 NIC_SRAM_DATA_CFG_NO_GPIO2;
2877 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2878 GRC_LCLCTRL_GPIO_OE1 |
2879 GRC_LCLCTRL_GPIO_OE2 |
2880 GRC_LCLCTRL_GPIO_OUTPUT1 |
2881 GRC_LCLCTRL_GPIO_OUTPUT2;
2883 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2884 GRC_LCLCTRL_GPIO_OUTPUT2);
2886 tw32_wait_f(GRC_LOCAL_CTRL,
2887 tp->grc_local_ctrl | grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2892 tw32_wait_f(GRC_LOCAL_CTRL,
2893 tp->grc_local_ctrl | grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2897 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2898 tw32_wait_f(GRC_LOCAL_CTRL,
2899 tp->grc_local_ctrl | grc_local_ctrl,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2905 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2909 /* Serialize power state transitions */
2910 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2913 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2914 msg = TG3_GPIO_MSG_NEED_VAUX;
2916 msg = tg3_set_function_status(tp, msg);
2918 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2921 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2922 tg3_pwrsrc_switch_to_vaux(tp);
2924 tg3_pwrsrc_die_with_vmain(tp);
2927 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2930 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2932 bool need_vaux = false;
2934 /* The GPIOs do something completely different on 57765. */
2935 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2938 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2939 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2940 tg3_asic_rev(tp) == ASIC_REV_5720) {
2941 tg3_frob_aux_power_5717(tp, include_wol ?
2942 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2946 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2947 struct net_device *dev_peer;
2949 dev_peer = pci_get_drvdata(tp->pdev_peer);
2951 /* remove_one() may have been run on the peer. */
2953 struct tg3 *tp_peer = netdev_priv(dev_peer);
2955 if (tg3_flag(tp_peer, INIT_COMPLETE))
2958 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2959 tg3_flag(tp_peer, ENABLE_ASF))
2964 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2965 tg3_flag(tp, ENABLE_ASF))
2969 tg3_pwrsrc_switch_to_vaux(tp);
2971 tg3_pwrsrc_die_with_vmain(tp);
2974 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2976 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2978 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2979 if (speed != SPEED_10)
2981 } else if (speed == SPEED_10)
2987 static bool tg3_phy_power_bug(struct tg3 *tp)
2989 switch (tg3_asic_rev(tp)) {
2994 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3003 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3012 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3016 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3019 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3020 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3021 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3022 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3025 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3026 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3027 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3032 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3034 val = tr32(GRC_MISC_CFG);
3035 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3038 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3040 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3043 tg3_writephy(tp, MII_ADVERTISE, 0);
3044 tg3_writephy(tp, MII_BMCR,
3045 BMCR_ANENABLE | BMCR_ANRESTART);
3047 tg3_writephy(tp, MII_TG3_FET_TEST,
3048 phytest | MII_TG3_FET_SHADOW_EN);
3049 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3050 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3052 MII_TG3_FET_SHDW_AUXMODE4,
3055 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3058 } else if (do_low_power) {
3059 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3060 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3062 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3063 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3064 MII_TG3_AUXCTL_PCTL_VREG_11V;
3065 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3068 /* The PHY should not be powered down on some chips because
3071 if (tg3_phy_power_bug(tp))
3074 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3075 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3076 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3077 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3078 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3079 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3082 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3085 /* tp->lock is held. */
3086 static int tg3_nvram_lock(struct tg3 *tp)
3088 if (tg3_flag(tp, NVRAM)) {
3091 if (tp->nvram_lock_cnt == 0) {
3092 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3093 for (i = 0; i < 8000; i++) {
3094 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3099 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3103 tp->nvram_lock_cnt++;
3108 /* tp->lock is held. */
3109 static void tg3_nvram_unlock(struct tg3 *tp)
3111 if (tg3_flag(tp, NVRAM)) {
3112 if (tp->nvram_lock_cnt > 0)
3113 tp->nvram_lock_cnt--;
3114 if (tp->nvram_lock_cnt == 0)
3115 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3119 /* tp->lock is held. */
3120 static void tg3_enable_nvram_access(struct tg3 *tp)
3122 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3123 u32 nvaccess = tr32(NVRAM_ACCESS);
3125 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3129 /* tp->lock is held. */
3130 static void tg3_disable_nvram_access(struct tg3 *tp)
3132 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3133 u32 nvaccess = tr32(NVRAM_ACCESS);
3135 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3139 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3140 u32 offset, u32 *val)
3145 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3148 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3149 EEPROM_ADDR_DEVID_MASK |
3151 tw32(GRC_EEPROM_ADDR,
3153 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3154 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3155 EEPROM_ADDR_ADDR_MASK) |
3156 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3158 for (i = 0; i < 1000; i++) {
3159 tmp = tr32(GRC_EEPROM_ADDR);
3161 if (tmp & EEPROM_ADDR_COMPLETE)
3165 if (!(tmp & EEPROM_ADDR_COMPLETE))
3168 tmp = tr32(GRC_EEPROM_DATA);
3171 * The data will always be opposite the native endian
3172 * format. Perform a blind byteswap to compensate.
3179 #define NVRAM_CMD_TIMEOUT 10000
3181 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3185 tw32(NVRAM_CMD, nvram_cmd);
3186 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3188 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3194 if (i == NVRAM_CMD_TIMEOUT)
3200 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3202 if (tg3_flag(tp, NVRAM) &&
3203 tg3_flag(tp, NVRAM_BUFFERED) &&
3204 tg3_flag(tp, FLASH) &&
3205 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3206 (tp->nvram_jedecnum == JEDEC_ATMEL))
3208 addr = ((addr / tp->nvram_pagesize) <<
3209 ATMEL_AT45DB0X1B_PAGE_POS) +
3210 (addr % tp->nvram_pagesize);
3215 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3217 if (tg3_flag(tp, NVRAM) &&
3218 tg3_flag(tp, NVRAM_BUFFERED) &&
3219 tg3_flag(tp, FLASH) &&
3220 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3221 (tp->nvram_jedecnum == JEDEC_ATMEL))
3223 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3224 tp->nvram_pagesize) +
3225 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3230 /* NOTE: Data read in from NVRAM is byteswapped according to
3231 * the byteswapping settings for all other register accesses.
3232 * tg3 devices are BE devices, so on a BE machine, the data
3233 * returned will be exactly as it is seen in NVRAM. On a LE
3234 * machine, the 32-bit value will be byteswapped.
3236 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3240 if (!tg3_flag(tp, NVRAM))
3241 return tg3_nvram_read_using_eeprom(tp, offset, val);
3243 offset = tg3_nvram_phys_addr(tp, offset);
3245 if (offset > NVRAM_ADDR_MSK)
3248 ret = tg3_nvram_lock(tp);
3252 tg3_enable_nvram_access(tp);
3254 tw32(NVRAM_ADDR, offset);
3255 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3256 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3259 *val = tr32(NVRAM_RDDATA);
3261 tg3_disable_nvram_access(tp);
3263 tg3_nvram_unlock(tp);
3268 /* Ensures NVRAM data is in bytestream format. */
3269 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3272 int res = tg3_nvram_read(tp, offset, &v);
3274 *val = cpu_to_be32(v);
3278 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3279 u32 offset, u32 len, u8 *buf)
3284 for (i = 0; i < len; i += 4) {
3290 memcpy(&data, buf + i, 4);
3293 * The SEEPROM interface expects the data to always be opposite
3294 * the native endian format. We accomplish this by reversing
3295 * all the operations that would have been performed on the
3296 * data from a call to tg3_nvram_read_be32().
3298 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3300 val = tr32(GRC_EEPROM_ADDR);
3301 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3303 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3305 tw32(GRC_EEPROM_ADDR, val |
3306 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3307 (addr & EEPROM_ADDR_ADDR_MASK) |
3311 for (j = 0; j < 1000; j++) {
3312 val = tr32(GRC_EEPROM_ADDR);
3314 if (val & EEPROM_ADDR_COMPLETE)
3318 if (!(val & EEPROM_ADDR_COMPLETE)) {
3327 /* offset and length are dword aligned */
3328 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3332 u32 pagesize = tp->nvram_pagesize;
3333 u32 pagemask = pagesize - 1;
3337 tmp = kmalloc(pagesize, GFP_KERNEL);
3343 u32 phy_addr, page_off, size;
3345 phy_addr = offset & ~pagemask;
3347 for (j = 0; j < pagesize; j += 4) {
3348 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3349 (__be32 *) (tmp + j));
3356 page_off = offset & pagemask;
3363 memcpy(tmp + page_off, buf, size);
3365 offset = offset + (pagesize - page_off);
3367 tg3_enable_nvram_access(tp);
3370 * Before we can erase the flash page, we need
3371 * to issue a special "write enable" command.
3373 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3375 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3378 /* Erase the target page */
3379 tw32(NVRAM_ADDR, phy_addr);
3381 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3382 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3384 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3387 /* Issue another write enable to start the write. */
3388 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3390 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3393 for (j = 0; j < pagesize; j += 4) {
3396 data = *((__be32 *) (tmp + j));
3398 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3400 tw32(NVRAM_ADDR, phy_addr + j);
3402 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3406 nvram_cmd |= NVRAM_CMD_FIRST;
3407 else if (j == (pagesize - 4))
3408 nvram_cmd |= NVRAM_CMD_LAST;
3410 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3418 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 tg3_nvram_exec_cmd(tp, nvram_cmd);
3426 /* offset and length are dword aligned */
3427 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3432 for (i = 0; i < len; i += 4, offset += 4) {
3433 u32 page_off, phy_addr, nvram_cmd;
3436 memcpy(&data, buf + i, 4);
3437 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3439 page_off = offset % tp->nvram_pagesize;
3441 phy_addr = tg3_nvram_phys_addr(tp, offset);
3443 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3445 if (page_off == 0 || i == 0)
3446 nvram_cmd |= NVRAM_CMD_FIRST;
3447 if (page_off == (tp->nvram_pagesize - 4))
3448 nvram_cmd |= NVRAM_CMD_LAST;
3451 nvram_cmd |= NVRAM_CMD_LAST;
3453 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3454 !tg3_flag(tp, FLASH) ||
3455 !tg3_flag(tp, 57765_PLUS))
3456 tw32(NVRAM_ADDR, phy_addr);
3458 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3459 !tg3_flag(tp, 5755_PLUS) &&
3460 (tp->nvram_jedecnum == JEDEC_ST) &&
3461 (nvram_cmd & NVRAM_CMD_FIRST)) {
3464 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465 ret = tg3_nvram_exec_cmd(tp, cmd);
3469 if (!tg3_flag(tp, FLASH)) {
3470 /* We always do complete word writes to eeprom. */
3471 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3474 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3486 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3487 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3488 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3492 if (!tg3_flag(tp, NVRAM)) {
3493 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3497 ret = tg3_nvram_lock(tp);
3501 tg3_enable_nvram_access(tp);
3502 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3503 tw32(NVRAM_WRITE1, 0x406);
3505 grc_mode = tr32(GRC_MODE);
3506 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3508 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3509 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3512 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3516 grc_mode = tr32(GRC_MODE);
3517 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3519 tg3_disable_nvram_access(tp);
3520 tg3_nvram_unlock(tp);
3523 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3524 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3531 #define RX_CPU_SCRATCH_BASE 0x30000
3532 #define RX_CPU_SCRATCH_SIZE 0x04000
3533 #define TX_CPU_SCRATCH_BASE 0x34000
3534 #define TX_CPU_SCRATCH_SIZE 0x04000
3536 /* tp->lock is held. */
3537 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3540 const int iters = 10000;
3542 for (i = 0; i < iters; i++) {
3543 tw32(cpu_base + CPU_STATE, 0xffffffff);
3544 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3545 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3549 return (i == iters) ? -EBUSY : 0;
3552 /* tp->lock is held. */
3553 static int tg3_rxcpu_pause(struct tg3 *tp)
3555 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3557 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3558 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3564 /* tp->lock is held. */
3565 static int tg3_txcpu_pause(struct tg3 *tp)
3567 return tg3_pause_cpu(tp, TX_CPU_BASE);
3570 /* tp->lock is held. */
3571 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3573 tw32(cpu_base + CPU_STATE, 0xffffffff);
3574 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3577 /* tp->lock is held. */
3578 static void tg3_rxcpu_resume(struct tg3 *tp)
3580 tg3_resume_cpu(tp, RX_CPU_BASE);
3583 /* tp->lock is held. */
3584 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3588 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3590 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3591 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3593 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3596 if (cpu_base == RX_CPU_BASE) {
3597 rc = tg3_rxcpu_pause(tp);
3600 * There is only an Rx CPU for the 5750 derivative in the
3603 if (tg3_flag(tp, IS_SSB_CORE))
3606 rc = tg3_txcpu_pause(tp);
3610 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3611 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3615 /* Clear firmware's nvram arbitration. */
3616 if (tg3_flag(tp, NVRAM))
3617 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3621 static int tg3_fw_data_len(struct tg3 *tp,
3622 const struct tg3_firmware_hdr *fw_hdr)
3626 /* Non fragmented firmware have one firmware header followed by a
3627 * contiguous chunk of data to be written. The length field in that
3628 * header is not the length of data to be written but the complete
3629 * length of the bss. The data length is determined based on
3630 * tp->fw->size minus headers.
3632 * Fragmented firmware have a main header followed by multiple
3633 * fragments. Each fragment is identical to non fragmented firmware
3634 * with a firmware header followed by a contiguous chunk of data. In
3635 * the main header, the length field is unused and set to 0xffffffff.
3636 * In each fragment header the length is the entire size of that
3637 * fragment i.e. fragment data + header length. Data length is
3638 * therefore length field in the header minus TG3_FW_HDR_LEN.
3640 if (tp->fw_len == 0xffffffff)
3641 fw_len = be32_to_cpu(fw_hdr->len);
3643 fw_len = tp->fw->size;
3645 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3648 /* tp->lock is held. */
3649 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3650 u32 cpu_scratch_base, int cpu_scratch_size,
3651 const struct tg3_firmware_hdr *fw_hdr)
3654 void (*write_op)(struct tg3 *, u32, u32);
3655 int total_len = tp->fw->size;
3657 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3659 "%s: Trying to load TX cpu firmware which is 5705\n",
3664 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3665 write_op = tg3_write_mem;
3667 write_op = tg3_write_indirect_reg32;
3669 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3670 /* It is possible that bootcode is still loading at this point.
3671 * Get the nvram lock first before halting the cpu.
3673 int lock_err = tg3_nvram_lock(tp);
3674 err = tg3_halt_cpu(tp, cpu_base);
3676 tg3_nvram_unlock(tp);
3680 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3681 write_op(tp, cpu_scratch_base + i, 0);
3682 tw32(cpu_base + CPU_STATE, 0xffffffff);
3683 tw32(cpu_base + CPU_MODE,
3684 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3686 /* Subtract additional main header for fragmented firmware and
3687 * advance to the first fragment
3689 total_len -= TG3_FW_HDR_LEN;
3694 u32 *fw_data = (u32 *)(fw_hdr + 1);
3695 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3696 write_op(tp, cpu_scratch_base +
3697 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3699 be32_to_cpu(fw_data[i]));
3701 total_len -= be32_to_cpu(fw_hdr->len);
3703 /* Advance to next fragment */
3704 fw_hdr = (struct tg3_firmware_hdr *)
3705 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3706 } while (total_len > 0);
3714 /* tp->lock is held. */
3715 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3718 const int iters = 5;
3720 tw32(cpu_base + CPU_STATE, 0xffffffff);
3721 tw32_f(cpu_base + CPU_PC, pc);
3723 for (i = 0; i < iters; i++) {
3724 if (tr32(cpu_base + CPU_PC) == pc)
3726 tw32(cpu_base + CPU_STATE, 0xffffffff);
3727 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3728 tw32_f(cpu_base + CPU_PC, pc);
3732 return (i == iters) ? -EBUSY : 0;
3735 /* tp->lock is held. */
3736 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3738 const struct tg3_firmware_hdr *fw_hdr;
3741 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3743 /* Firmware blob starts with version numbers, followed by
3744 start address and length. We are setting complete length.
3745 length = end_address_of_bss - start_address_of_text.
3746 Remainder is the blob to be loaded contiguously
3747 from start address. */
3749 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3750 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3755 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3756 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3761 /* Now startup only the RX cpu. */
3762 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3763 be32_to_cpu(fw_hdr->base_addr));
3765 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3766 "should be %08x\n", __func__,
3767 tr32(RX_CPU_BASE + CPU_PC),
3768 be32_to_cpu(fw_hdr->base_addr));
3772 tg3_rxcpu_resume(tp);
3777 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3779 const int iters = 1000;
3783 /* Wait for boot code to complete initialization and enter service
3784 * loop. It is then safe to download service patches
3786 for (i = 0; i < iters; i++) {
3787 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3794 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3798 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3800 netdev_warn(tp->dev,
3801 "Other patches exist. Not downloading EEE patch\n");
3808 /* tp->lock is held. */
3809 static void tg3_load_57766_firmware(struct tg3 *tp)
3811 struct tg3_firmware_hdr *fw_hdr;
3813 if (!tg3_flag(tp, NO_NVRAM))
3816 if (tg3_validate_rxcpu_state(tp))
3822 /* This firmware blob has a different format than older firmware
3823 * releases as given below. The main difference is we have fragmented
3824 * data to be written to non-contiguous locations.
3826 * In the beginning we have a firmware header identical to other
3827 * firmware which consists of version, base addr and length. The length
3828 * here is unused and set to 0xffffffff.
3830 * This is followed by a series of firmware fragments which are
3831 * individually identical to previous firmware. i.e. they have the
3832 * firmware header and followed by data for that fragment. The version
3833 * field of the individual fragment header is unused.
3836 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3837 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3840 if (tg3_rxcpu_pause(tp))
3843 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3844 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3846 tg3_rxcpu_resume(tp);
3849 /* tp->lock is held. */
3850 static int tg3_load_tso_firmware(struct tg3 *tp)
3852 const struct tg3_firmware_hdr *fw_hdr;
3853 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3856 if (!tg3_flag(tp, FW_TSO))
3859 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3861 /* Firmware blob starts with version numbers, followed by
3862 start address and length. We are setting complete length.
3863 length = end_address_of_bss - start_address_of_text.
3864 Remainder is the blob to be loaded contiguously
3865 from start address. */
3867 cpu_scratch_size = tp->fw_len;
3869 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3870 cpu_base = RX_CPU_BASE;
3871 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3873 cpu_base = TX_CPU_BASE;
3874 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3875 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3878 err = tg3_load_firmware_cpu(tp, cpu_base,
3879 cpu_scratch_base, cpu_scratch_size,
3884 /* Now startup the cpu. */
3885 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3886 be32_to_cpu(fw_hdr->base_addr));
3889 "%s fails to set CPU PC, is %08x should be %08x\n",
3890 __func__, tr32(cpu_base + CPU_PC),
3891 be32_to_cpu(fw_hdr->base_addr));
3895 tg3_resume_cpu(tp, cpu_base);
3900 /* tp->lock is held. */
3901 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3903 u32 addr_high, addr_low;
3906 addr_high = ((tp->dev->dev_addr[0] << 8) |
3907 tp->dev->dev_addr[1]);
3908 addr_low = ((tp->dev->dev_addr[2] << 24) |
3909 (tp->dev->dev_addr[3] << 16) |
3910 (tp->dev->dev_addr[4] << 8) |
3911 (tp->dev->dev_addr[5] << 0));
3912 for (i = 0; i < 4; i++) {
3913 if (i == 1 && skip_mac_1)
3915 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3916 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3919 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3920 tg3_asic_rev(tp) == ASIC_REV_5704) {
3921 for (i = 0; i < 12; i++) {
3922 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3923 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3927 addr_high = (tp->dev->dev_addr[0] +
3928 tp->dev->dev_addr[1] +
3929 tp->dev->dev_addr[2] +
3930 tp->dev->dev_addr[3] +
3931 tp->dev->dev_addr[4] +
3932 tp->dev->dev_addr[5]) &
3933 TX_BACKOFF_SEED_MASK;
3934 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3937 static void tg3_enable_register_access(struct tg3 *tp)
3940 * Make sure register accesses (indirect or otherwise) will function
3943 pci_write_config_dword(tp->pdev,
3944 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3947 static int tg3_power_up(struct tg3 *tp)
3951 tg3_enable_register_access(tp);
3953 err = pci_set_power_state(tp->pdev, PCI_D0);
3955 /* Switch out of Vaux if it is a NIC */
3956 tg3_pwrsrc_switch_to_vmain(tp);
3958 netdev_err(tp->dev, "Transition to D0 failed\n");
3964 static int tg3_setup_phy(struct tg3 *, bool);
3966 static int tg3_power_down_prepare(struct tg3 *tp)
3969 bool device_should_wake, do_low_power;
3971 tg3_enable_register_access(tp);
3973 /* Restore the CLKREQ setting. */
3974 if (tg3_flag(tp, CLKREQ_BUG))
3975 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3976 PCI_EXP_LNKCTL_CLKREQ_EN);
3978 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3979 tw32(TG3PCI_MISC_HOST_CTRL,
3980 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3982 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3983 tg3_flag(tp, WOL_ENABLE);
3985 if (tg3_flag(tp, USE_PHYLIB)) {
3986 do_low_power = false;
3987 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3988 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3989 struct phy_device *phydev;
3990 u32 phyid, advertising;
3992 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3994 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3996 tp->link_config.speed = phydev->speed;
3997 tp->link_config.duplex = phydev->duplex;
3998 tp->link_config.autoneg = phydev->autoneg;
3999 tp->link_config.advertising = phydev->advertising;
4001 advertising = ADVERTISED_TP |
4003 ADVERTISED_Autoneg |
4004 ADVERTISED_10baseT_Half;
4006 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4007 if (tg3_flag(tp, WOL_SPEED_100MB))
4009 ADVERTISED_100baseT_Half |
4010 ADVERTISED_100baseT_Full |
4011 ADVERTISED_10baseT_Full;
4013 advertising |= ADVERTISED_10baseT_Full;
4016 phydev->advertising = advertising;
4018 phy_start_aneg(phydev);
4020 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4021 if (phyid != PHY_ID_BCMAC131) {
4022 phyid &= PHY_BCM_OUI_MASK;
4023 if (phyid == PHY_BCM_OUI_1 ||
4024 phyid == PHY_BCM_OUI_2 ||
4025 phyid == PHY_BCM_OUI_3)
4026 do_low_power = true;
4030 do_low_power = true;
4032 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4033 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4035 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4036 tg3_setup_phy(tp, false);
4039 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4042 val = tr32(GRC_VCPU_EXT_CTRL);
4043 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4044 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4048 for (i = 0; i < 200; i++) {
4049 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4050 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4055 if (tg3_flag(tp, WOL_CAP))
4056 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4057 WOL_DRV_STATE_SHUTDOWN |
4061 if (device_should_wake) {
4064 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4066 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4067 tg3_phy_auxctl_write(tp,
4068 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4069 MII_TG3_AUXCTL_PCTL_WOL_EN |
4070 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4071 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4075 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4076 mac_mode = MAC_MODE_PORT_MODE_GMII;
4077 else if (tp->phy_flags &
4078 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4079 if (tp->link_config.active_speed == SPEED_1000)
4080 mac_mode = MAC_MODE_PORT_MODE_GMII;
4082 mac_mode = MAC_MODE_PORT_MODE_MII;
4084 mac_mode = MAC_MODE_PORT_MODE_MII;
4086 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4087 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4088 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4089 SPEED_100 : SPEED_10;
4090 if (tg3_5700_link_polarity(tp, speed))
4091 mac_mode |= MAC_MODE_LINK_POLARITY;
4093 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4096 mac_mode = MAC_MODE_PORT_MODE_TBI;
4099 if (!tg3_flag(tp, 5750_PLUS))
4100 tw32(MAC_LED_CTRL, tp->led_ctrl);
4102 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4103 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4104 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4105 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4107 if (tg3_flag(tp, ENABLE_APE))
4108 mac_mode |= MAC_MODE_APE_TX_EN |
4109 MAC_MODE_APE_RX_EN |
4110 MAC_MODE_TDE_ENABLE;
4112 tw32_f(MAC_MODE, mac_mode);
4115 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4119 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4120 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4121 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4124 base_val = tp->pci_clock_ctrl;
4125 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4126 CLOCK_CTRL_TXCLK_DISABLE);
4128 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4129 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4130 } else if (tg3_flag(tp, 5780_CLASS) ||
4131 tg3_flag(tp, CPMU_PRESENT) ||
4132 tg3_asic_rev(tp) == ASIC_REV_5906) {
4134 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4135 u32 newbits1, newbits2;
4137 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4138 tg3_asic_rev(tp) == ASIC_REV_5701) {
4139 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4140 CLOCK_CTRL_TXCLK_DISABLE |
4142 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4143 } else if (tg3_flag(tp, 5705_PLUS)) {
4144 newbits1 = CLOCK_CTRL_625_CORE;
4145 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4147 newbits1 = CLOCK_CTRL_ALTCLK;
4148 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4151 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4154 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4157 if (!tg3_flag(tp, 5705_PLUS)) {
4160 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161 tg3_asic_rev(tp) == ASIC_REV_5701) {
4162 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4163 CLOCK_CTRL_TXCLK_DISABLE |
4164 CLOCK_CTRL_44MHZ_CORE);
4166 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4169 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4170 tp->pci_clock_ctrl | newbits3, 40);
4174 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4175 tg3_power_down_phy(tp, do_low_power);
4177 tg3_frob_aux_power(tp, true);
4179 /* Workaround for unstable PLL clock */
4180 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4181 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4182 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4183 u32 val = tr32(0x7d00);
4185 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4187 if (!tg3_flag(tp, ENABLE_ASF)) {
4190 err = tg3_nvram_lock(tp);
4191 tg3_halt_cpu(tp, RX_CPU_BASE);
4193 tg3_nvram_unlock(tp);
4197 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4199 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4204 static void tg3_power_down(struct tg3 *tp)
4206 tg3_power_down_prepare(tp);
4208 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4209 pci_set_power_state(tp->pdev, PCI_D3hot);
4212 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4214 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4215 case MII_TG3_AUX_STAT_10HALF:
4217 *duplex = DUPLEX_HALF;
4220 case MII_TG3_AUX_STAT_10FULL:
4222 *duplex = DUPLEX_FULL;
4225 case MII_TG3_AUX_STAT_100HALF:
4227 *duplex = DUPLEX_HALF;
4230 case MII_TG3_AUX_STAT_100FULL:
4232 *duplex = DUPLEX_FULL;
4235 case MII_TG3_AUX_STAT_1000HALF:
4236 *speed = SPEED_1000;
4237 *duplex = DUPLEX_HALF;
4240 case MII_TG3_AUX_STAT_1000FULL:
4241 *speed = SPEED_1000;
4242 *duplex = DUPLEX_FULL;
4246 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4247 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4249 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4253 *speed = SPEED_UNKNOWN;
4254 *duplex = DUPLEX_UNKNOWN;
4259 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4264 new_adv = ADVERTISE_CSMA;
4265 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4266 new_adv |= mii_advertise_flowctrl(flowctrl);
4268 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4272 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4273 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4275 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4276 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4277 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4279 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4284 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4287 tw32(TG3_CPMU_EEE_MODE,
4288 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4290 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4295 /* Advertise 100-BaseTX EEE ability */
4296 if (advertise & ADVERTISED_100baseT_Full)
4297 val |= MDIO_AN_EEE_ADV_100TX;
4298 /* Advertise 1000-BaseT EEE ability */
4299 if (advertise & ADVERTISED_1000baseT_Full)
4300 val |= MDIO_AN_EEE_ADV_1000T;
4302 if (!tp->eee.eee_enabled) {
4304 tp->eee.advertised = 0;
4306 tp->eee.advertised = advertise &
4307 (ADVERTISED_100baseT_Full |
4308 ADVERTISED_1000baseT_Full);
4311 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4315 switch (tg3_asic_rev(tp)) {
4317 case ASIC_REV_57765:
4318 case ASIC_REV_57766:
4320 /* If we advertised any eee advertisements above... */
4322 val = MII_TG3_DSP_TAP26_ALNOKO |
4323 MII_TG3_DSP_TAP26_RMRXSTO |
4324 MII_TG3_DSP_TAP26_OPCSINPT;
4325 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4329 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4330 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4331 MII_TG3_DSP_CH34TP2_HIBW01);
4334 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4343 static void tg3_phy_copper_begin(struct tg3 *tp)
4345 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4346 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4349 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4350 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4351 adv = ADVERTISED_10baseT_Half |
4352 ADVERTISED_10baseT_Full;
4353 if (tg3_flag(tp, WOL_SPEED_100MB))
4354 adv |= ADVERTISED_100baseT_Half |
4355 ADVERTISED_100baseT_Full;
4356 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4357 adv |= ADVERTISED_1000baseT_Half |
4358 ADVERTISED_1000baseT_Full;
4360 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4362 adv = tp->link_config.advertising;
4363 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4364 adv &= ~(ADVERTISED_1000baseT_Half |
4365 ADVERTISED_1000baseT_Full);
4367 fc = tp->link_config.flowctrl;
4370 tg3_phy_autoneg_cfg(tp, adv, fc);
4372 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4373 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4374 /* Normally during power down we want to autonegotiate
4375 * the lowest possible speed for WOL. However, to avoid
4376 * link flap, we leave it untouched.
4381 tg3_writephy(tp, MII_BMCR,
4382 BMCR_ANENABLE | BMCR_ANRESTART);
4385 u32 bmcr, orig_bmcr;
4387 tp->link_config.active_speed = tp->link_config.speed;
4388 tp->link_config.active_duplex = tp->link_config.duplex;
4390 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4391 /* With autoneg disabled, 5715 only links up when the
4392 * advertisement register has the configured speed
4395 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4399 switch (tp->link_config.speed) {
4405 bmcr |= BMCR_SPEED100;
4409 bmcr |= BMCR_SPEED1000;
4413 if (tp->link_config.duplex == DUPLEX_FULL)
4414 bmcr |= BMCR_FULLDPLX;
4416 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4417 (bmcr != orig_bmcr)) {
4418 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4419 for (i = 0; i < 1500; i++) {
4423 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4424 tg3_readphy(tp, MII_BMSR, &tmp))
4426 if (!(tmp & BMSR_LSTATUS)) {
4431 tg3_writephy(tp, MII_BMCR, bmcr);
4437 static int tg3_phy_pull_config(struct tg3 *tp)
4442 err = tg3_readphy(tp, MII_BMCR, &val);
4446 if (!(val & BMCR_ANENABLE)) {
4447 tp->link_config.autoneg = AUTONEG_DISABLE;
4448 tp->link_config.advertising = 0;
4449 tg3_flag_clear(tp, PAUSE_AUTONEG);
4453 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4455 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4458 tp->link_config.speed = SPEED_10;
4461 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4464 tp->link_config.speed = SPEED_100;
4466 case BMCR_SPEED1000:
4467 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468 tp->link_config.speed = SPEED_1000;
4476 if (val & BMCR_FULLDPLX)
4477 tp->link_config.duplex = DUPLEX_FULL;
4479 tp->link_config.duplex = DUPLEX_HALF;
4481 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4487 tp->link_config.autoneg = AUTONEG_ENABLE;
4488 tp->link_config.advertising = ADVERTISED_Autoneg;
4489 tg3_flag_set(tp, PAUSE_AUTONEG);
4491 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4494 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4498 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4499 tp->link_config.advertising |= adv | ADVERTISED_TP;
4501 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4503 tp->link_config.advertising |= ADVERTISED_FIBRE;
4506 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4509 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4510 err = tg3_readphy(tp, MII_CTRL1000, &val);
4514 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4516 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4520 adv = tg3_decode_flowctrl_1000X(val);
4521 tp->link_config.flowctrl = adv;
4523 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4524 adv = mii_adv_to_ethtool_adv_x(val);
4527 tp->link_config.advertising |= adv;
4534 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4538 /* Turn off tap power management. */
4539 /* Set Extended packet length bit */
4540 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4542 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4543 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4544 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4545 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4546 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4553 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4555 struct ethtool_eee eee;
4557 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4560 tg3_eee_pull_config(tp, &eee);
4562 if (tp->eee.eee_enabled) {
4563 if (tp->eee.advertised != eee.advertised ||
4564 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4565 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4568 /* EEE is disabled but we're advertising */
4576 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4578 u32 advmsk, tgtadv, advertising;
4580 advertising = tp->link_config.advertising;
4581 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4583 advmsk = ADVERTISE_ALL;
4584 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4585 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4586 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4589 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4592 if ((*lcladv & advmsk) != tgtadv)
4595 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4598 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4600 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4604 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4605 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4606 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4607 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4608 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4610 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4613 if (tg3_ctrl != tgtadv)
4620 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4624 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4627 if (tg3_readphy(tp, MII_STAT1000, &val))
4630 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4633 if (tg3_readphy(tp, MII_LPA, rmtadv))
4636 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4637 tp->link_config.rmt_adv = lpeth;
4642 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4644 if (curr_link_up != tp->link_up) {
4646 netif_carrier_on(tp->dev);
4648 netif_carrier_off(tp->dev);
4649 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4650 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653 tg3_link_report(tp);
4660 static void tg3_clear_mac_status(struct tg3 *tp)
4665 MAC_STATUS_SYNC_CHANGED |
4666 MAC_STATUS_CFG_CHANGED |
4667 MAC_STATUS_MI_COMPLETION |
4668 MAC_STATUS_LNKSTATE_CHANGED);
4672 static void tg3_setup_eee(struct tg3 *tp)
4676 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4677 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4678 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4679 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4681 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4683 tw32_f(TG3_CPMU_EEE_CTRL,
4684 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4686 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4687 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4688 TG3_CPMU_EEEMD_LPI_IN_RX |
4689 TG3_CPMU_EEEMD_EEE_ENABLE;
4691 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4692 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4694 if (tg3_flag(tp, ENABLE_APE))
4695 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4697 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4699 tw32_f(TG3_CPMU_EEE_DBTMR1,
4700 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4701 (tp->eee.tx_lpi_timer & 0xffff));
4703 tw32_f(TG3_CPMU_EEE_DBTMR2,
4704 TG3_CPMU_DBTMR2_APE_TX_2047US |
4705 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4708 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4710 bool current_link_up;
4712 u32 lcl_adv, rmt_adv;
4717 tg3_clear_mac_status(tp);
4719 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4721 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4725 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4727 /* Some third-party PHYs need to be reset on link going
4730 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4731 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4732 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4734 tg3_readphy(tp, MII_BMSR, &bmsr);
4735 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4736 !(bmsr & BMSR_LSTATUS))
4742 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4743 tg3_readphy(tp, MII_BMSR, &bmsr);
4744 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4745 !tg3_flag(tp, INIT_COMPLETE))
4748 if (!(bmsr & BMSR_LSTATUS)) {
4749 err = tg3_init_5401phy_dsp(tp);
4753 tg3_readphy(tp, MII_BMSR, &bmsr);
4754 for (i = 0; i < 1000; i++) {
4756 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4757 (bmsr & BMSR_LSTATUS)) {
4763 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4764 TG3_PHY_REV_BCM5401_B0 &&
4765 !(bmsr & BMSR_LSTATUS) &&
4766 tp->link_config.active_speed == SPEED_1000) {
4767 err = tg3_phy_reset(tp);
4769 err = tg3_init_5401phy_dsp(tp);
4774 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4775 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4776 /* 5701 {A0,B0} CRC bug workaround */
4777 tg3_writephy(tp, 0x15, 0x0a75);
4778 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4779 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4780 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4783 /* Clear pending interrupts... */
4784 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4785 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4787 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4788 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4789 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4790 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4792 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4793 tg3_asic_rev(tp) == ASIC_REV_5701) {
4794 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4795 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4796 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4798 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4801 current_link_up = false;
4802 current_speed = SPEED_UNKNOWN;
4803 current_duplex = DUPLEX_UNKNOWN;
4804 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4805 tp->link_config.rmt_adv = 0;
4807 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4808 err = tg3_phy_auxctl_read(tp,
4809 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4811 if (!err && !(val & (1 << 10))) {
4812 tg3_phy_auxctl_write(tp,
4813 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4820 for (i = 0; i < 100; i++) {
4821 tg3_readphy(tp, MII_BMSR, &bmsr);
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS))
4828 if (bmsr & BMSR_LSTATUS) {
4831 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4832 for (i = 0; i < 2000; i++) {
4834 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4839 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4844 for (i = 0; i < 200; i++) {
4845 tg3_readphy(tp, MII_BMCR, &bmcr);
4846 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4848 if (bmcr && bmcr != 0x7fff)
4856 tp->link_config.active_speed = current_speed;
4857 tp->link_config.active_duplex = current_duplex;
4859 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4860 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4862 if ((bmcr & BMCR_ANENABLE) &&
4864 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4865 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4866 current_link_up = true;
4868 /* EEE settings changes take effect only after a phy
4869 * reset. If we have skipped a reset due to Link Flap
4870 * Avoidance being enabled, do it now.
4872 if (!eee_config_ok &&
4873 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4879 if (!(bmcr & BMCR_ANENABLE) &&
4880 tp->link_config.speed == current_speed &&
4881 tp->link_config.duplex == current_duplex) {
4882 current_link_up = true;
4886 if (current_link_up &&
4887 tp->link_config.active_duplex == DUPLEX_FULL) {
4890 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4891 reg = MII_TG3_FET_GEN_STAT;
4892 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4894 reg = MII_TG3_EXT_STAT;
4895 bit = MII_TG3_EXT_STAT_MDIX;
4898 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4899 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4901 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4906 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4907 tg3_phy_copper_begin(tp);
4909 if (tg3_flag(tp, ROBOSWITCH)) {
4910 current_link_up = true;
4911 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4912 current_speed = SPEED_1000;
4913 current_duplex = DUPLEX_FULL;
4914 tp->link_config.active_speed = current_speed;
4915 tp->link_config.active_duplex = current_duplex;
4918 tg3_readphy(tp, MII_BMSR, &bmsr);
4919 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4920 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4921 current_link_up = true;
4924 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4925 if (current_link_up) {
4926 if (tp->link_config.active_speed == SPEED_100 ||
4927 tp->link_config.active_speed == SPEED_10)
4928 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4930 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4931 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4932 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4934 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4936 /* In order for the 5750 core in BCM4785 chip to work properly
4937 * in RGMII mode, the Led Control Register must be set up.
4939 if (tg3_flag(tp, RGMII_MODE)) {
4940 u32 led_ctrl = tr32(MAC_LED_CTRL);
4941 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4943 if (tp->link_config.active_speed == SPEED_10)
4944 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4945 else if (tp->link_config.active_speed == SPEED_100)
4946 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4947 LED_CTRL_100MBPS_ON);
4948 else if (tp->link_config.active_speed == SPEED_1000)
4949 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4950 LED_CTRL_1000MBPS_ON);
4952 tw32(MAC_LED_CTRL, led_ctrl);
4956 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4957 if (tp->link_config.active_duplex == DUPLEX_HALF)
4958 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4960 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4961 if (current_link_up &&
4962 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4963 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4965 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4968 /* ??? Without this setting Netgear GA302T PHY does not
4969 * ??? send/receive packets...
4971 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4972 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4973 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4974 tw32_f(MAC_MI_MODE, tp->mi_mode);
4978 tw32_f(MAC_MODE, tp->mac_mode);
4981 tg3_phy_eee_adjust(tp, current_link_up);
4983 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4984 /* Polled via timer. */
4985 tw32_f(MAC_EVENT, 0);
4987 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4991 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4993 tp->link_config.active_speed == SPEED_1000 &&
4994 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4997 (MAC_STATUS_SYNC_CHANGED |
4998 MAC_STATUS_CFG_CHANGED));
5001 NIC_SRAM_FIRMWARE_MBOX,
5002 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5005 /* Prevent send BD corruption. */
5006 if (tg3_flag(tp, CLKREQ_BUG)) {
5007 if (tp->link_config.active_speed == SPEED_100 ||
5008 tp->link_config.active_speed == SPEED_10)
5009 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5010 PCI_EXP_LNKCTL_CLKREQ_EN);
5012 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5013 PCI_EXP_LNKCTL_CLKREQ_EN);
5016 tg3_test_and_report_link_chg(tp, current_link_up);
5021 struct tg3_fiber_aneginfo {
5023 #define ANEG_STATE_UNKNOWN 0
5024 #define ANEG_STATE_AN_ENABLE 1
5025 #define ANEG_STATE_RESTART_INIT 2
5026 #define ANEG_STATE_RESTART 3
5027 #define ANEG_STATE_DISABLE_LINK_OK 4
5028 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5029 #define ANEG_STATE_ABILITY_DETECT 6
5030 #define ANEG_STATE_ACK_DETECT_INIT 7
5031 #define ANEG_STATE_ACK_DETECT 8
5032 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5033 #define ANEG_STATE_COMPLETE_ACK 10
5034 #define ANEG_STATE_IDLE_DETECT_INIT 11
5035 #define ANEG_STATE_IDLE_DETECT 12
5036 #define ANEG_STATE_LINK_OK 13
5037 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5038 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5041 #define MR_AN_ENABLE 0x00000001
5042 #define MR_RESTART_AN 0x00000002
5043 #define MR_AN_COMPLETE 0x00000004
5044 #define MR_PAGE_RX 0x00000008
5045 #define MR_NP_LOADED 0x00000010
5046 #define MR_TOGGLE_TX 0x00000020
5047 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5048 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5049 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5050 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5051 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5052 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5053 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5054 #define MR_TOGGLE_RX 0x00002000
5055 #define MR_NP_RX 0x00004000
5057 #define MR_LINK_OK 0x80000000
5059 unsigned long link_time, cur_time;
5061 u32 ability_match_cfg;
5062 int ability_match_count;
5064 char ability_match, idle_match, ack_match;
5066 u32 txconfig, rxconfig;
5067 #define ANEG_CFG_NP 0x00000080
5068 #define ANEG_CFG_ACK 0x00000040
5069 #define ANEG_CFG_RF2 0x00000020
5070 #define ANEG_CFG_RF1 0x00000010
5071 #define ANEG_CFG_PS2 0x00000001
5072 #define ANEG_CFG_PS1 0x00008000
5073 #define ANEG_CFG_HD 0x00004000
5074 #define ANEG_CFG_FD 0x00002000
5075 #define ANEG_CFG_INVAL 0x00001f06
5080 #define ANEG_TIMER_ENAB 2
5081 #define ANEG_FAILED -1
5083 #define ANEG_STATE_SETTLE_TIME 10000
5085 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5086 struct tg3_fiber_aneginfo *ap)
5089 unsigned long delta;
5093 if (ap->state == ANEG_STATE_UNKNOWN) {
5097 ap->ability_match_cfg = 0;
5098 ap->ability_match_count = 0;
5099 ap->ability_match = 0;
5105 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5106 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5108 if (rx_cfg_reg != ap->ability_match_cfg) {
5109 ap->ability_match_cfg = rx_cfg_reg;
5110 ap->ability_match = 0;
5111 ap->ability_match_count = 0;
5113 if (++ap->ability_match_count > 1) {
5114 ap->ability_match = 1;
5115 ap->ability_match_cfg = rx_cfg_reg;
5118 if (rx_cfg_reg & ANEG_CFG_ACK)
5126 ap->ability_match_cfg = 0;
5127 ap->ability_match_count = 0;
5128 ap->ability_match = 0;
5134 ap->rxconfig = rx_cfg_reg;
5137 switch (ap->state) {
5138 case ANEG_STATE_UNKNOWN:
5139 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5140 ap->state = ANEG_STATE_AN_ENABLE;
5143 case ANEG_STATE_AN_ENABLE:
5144 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5145 if (ap->flags & MR_AN_ENABLE) {
5148 ap->ability_match_cfg = 0;
5149 ap->ability_match_count = 0;
5150 ap->ability_match = 0;
5154 ap->state = ANEG_STATE_RESTART_INIT;
5156 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5160 case ANEG_STATE_RESTART_INIT:
5161 ap->link_time = ap->cur_time;
5162 ap->flags &= ~(MR_NP_LOADED);
5164 tw32(MAC_TX_AUTO_NEG, 0);
5165 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5166 tw32_f(MAC_MODE, tp->mac_mode);
5169 ret = ANEG_TIMER_ENAB;
5170 ap->state = ANEG_STATE_RESTART;
5173 case ANEG_STATE_RESTART:
5174 delta = ap->cur_time - ap->link_time;
5175 if (delta > ANEG_STATE_SETTLE_TIME)
5176 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5178 ret = ANEG_TIMER_ENAB;
5181 case ANEG_STATE_DISABLE_LINK_OK:
5185 case ANEG_STATE_ABILITY_DETECT_INIT:
5186 ap->flags &= ~(MR_TOGGLE_TX);
5187 ap->txconfig = ANEG_CFG_FD;
5188 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5189 if (flowctrl & ADVERTISE_1000XPAUSE)
5190 ap->txconfig |= ANEG_CFG_PS1;
5191 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5192 ap->txconfig |= ANEG_CFG_PS2;
5193 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5194 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5195 tw32_f(MAC_MODE, tp->mac_mode);
5198 ap->state = ANEG_STATE_ABILITY_DETECT;
5201 case ANEG_STATE_ABILITY_DETECT:
5202 if (ap->ability_match != 0 && ap->rxconfig != 0)
5203 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5206 case ANEG_STATE_ACK_DETECT_INIT:
5207 ap->txconfig |= ANEG_CFG_ACK;
5208 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5209 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5210 tw32_f(MAC_MODE, tp->mac_mode);
5213 ap->state = ANEG_STATE_ACK_DETECT;
5216 case ANEG_STATE_ACK_DETECT:
5217 if (ap->ack_match != 0) {
5218 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5219 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5220 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5222 ap->state = ANEG_STATE_AN_ENABLE;
5224 } else if (ap->ability_match != 0 &&
5225 ap->rxconfig == 0) {
5226 ap->state = ANEG_STATE_AN_ENABLE;
5230 case ANEG_STATE_COMPLETE_ACK_INIT:
5231 if (ap->rxconfig & ANEG_CFG_INVAL) {
5235 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5236 MR_LP_ADV_HALF_DUPLEX |
5237 MR_LP_ADV_SYM_PAUSE |
5238 MR_LP_ADV_ASYM_PAUSE |
5239 MR_LP_ADV_REMOTE_FAULT1 |
5240 MR_LP_ADV_REMOTE_FAULT2 |
5241 MR_LP_ADV_NEXT_PAGE |
5244 if (ap->rxconfig & ANEG_CFG_FD)
5245 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5246 if (ap->rxconfig & ANEG_CFG_HD)
5247 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5248 if (ap->rxconfig & ANEG_CFG_PS1)
5249 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5250 if (ap->rxconfig & ANEG_CFG_PS2)
5251 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5252 if (ap->rxconfig & ANEG_CFG_RF1)
5253 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5254 if (ap->rxconfig & ANEG_CFG_RF2)
5255 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5256 if (ap->rxconfig & ANEG_CFG_NP)
5257 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5259 ap->link_time = ap->cur_time;
5261 ap->flags ^= (MR_TOGGLE_TX);
5262 if (ap->rxconfig & 0x0008)
5263 ap->flags |= MR_TOGGLE_RX;
5264 if (ap->rxconfig & ANEG_CFG_NP)
5265 ap->flags |= MR_NP_RX;
5266 ap->flags |= MR_PAGE_RX;
5268 ap->state = ANEG_STATE_COMPLETE_ACK;
5269 ret = ANEG_TIMER_ENAB;
5272 case ANEG_STATE_COMPLETE_ACK:
5273 if (ap->ability_match != 0 &&
5274 ap->rxconfig == 0) {
5275 ap->state = ANEG_STATE_AN_ENABLE;
5278 delta = ap->cur_time - ap->link_time;
5279 if (delta > ANEG_STATE_SETTLE_TIME) {
5280 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5281 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5283 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5284 !(ap->flags & MR_NP_RX)) {
5285 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5293 case ANEG_STATE_IDLE_DETECT_INIT:
5294 ap->link_time = ap->cur_time;
5295 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5296 tw32_f(MAC_MODE, tp->mac_mode);
5299 ap->state = ANEG_STATE_IDLE_DETECT;
5300 ret = ANEG_TIMER_ENAB;
5303 case ANEG_STATE_IDLE_DETECT:
5304 if (ap->ability_match != 0 &&
5305 ap->rxconfig == 0) {
5306 ap->state = ANEG_STATE_AN_ENABLE;
5309 delta = ap->cur_time - ap->link_time;
5310 if (delta > ANEG_STATE_SETTLE_TIME) {
5311 /* XXX another gem from the Broadcom driver :( */
5312 ap->state = ANEG_STATE_LINK_OK;
5316 case ANEG_STATE_LINK_OK:
5317 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5321 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5322 /* ??? unimplemented */
5325 case ANEG_STATE_NEXT_PAGE_WAIT:
5326 /* ??? unimplemented */
5337 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5340 struct tg3_fiber_aneginfo aninfo;
5341 int status = ANEG_FAILED;
5345 tw32_f(MAC_TX_AUTO_NEG, 0);
5347 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5348 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5351 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5354 memset(&aninfo, 0, sizeof(aninfo));
5355 aninfo.flags |= MR_AN_ENABLE;
5356 aninfo.state = ANEG_STATE_UNKNOWN;
5357 aninfo.cur_time = 0;
5359 while (++tick < 195000) {
5360 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5361 if (status == ANEG_DONE || status == ANEG_FAILED)
5367 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368 tw32_f(MAC_MODE, tp->mac_mode);
5371 *txflags = aninfo.txconfig;
5372 *rxflags = aninfo.flags;
5374 if (status == ANEG_DONE &&
5375 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5376 MR_LP_ADV_FULL_DUPLEX)))
5382 static void tg3_init_bcm8002(struct tg3 *tp)
5384 u32 mac_status = tr32(MAC_STATUS);
5387 /* Reset when initting first time or we have a link. */
5388 if (tg3_flag(tp, INIT_COMPLETE) &&
5389 !(mac_status & MAC_STATUS_PCS_SYNCED))
5392 /* Set PLL lock range. */
5393 tg3_writephy(tp, 0x16, 0x8007);
5396 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5398 /* Wait for reset to complete. */
5399 /* XXX schedule_timeout() ... */
5400 for (i = 0; i < 500; i++)
5403 /* Config mode; select PMA/Ch 1 regs. */
5404 tg3_writephy(tp, 0x10, 0x8411);
5406 /* Enable auto-lock and comdet, select txclk for tx. */
5407 tg3_writephy(tp, 0x11, 0x0a10);
5409 tg3_writephy(tp, 0x18, 0x00a0);
5410 tg3_writephy(tp, 0x16, 0x41ff);
5412 /* Assert and deassert POR. */
5413 tg3_writephy(tp, 0x13, 0x0400);
5415 tg3_writephy(tp, 0x13, 0x0000);
5417 tg3_writephy(tp, 0x11, 0x0a50);
5419 tg3_writephy(tp, 0x11, 0x0a10);
5421 /* Wait for signal to stabilize */
5422 /* XXX schedule_timeout() ... */
5423 for (i = 0; i < 15000; i++)
5426 /* Deselect the channel register so we can read the PHYID
5429 tg3_writephy(tp, 0x10, 0x8011);
5432 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5435 bool current_link_up;
5436 u32 sg_dig_ctrl, sg_dig_status;
5437 u32 serdes_cfg, expected_sg_dig_ctrl;
5438 int workaround, port_a;
5441 expected_sg_dig_ctrl = 0;
5444 current_link_up = false;
5446 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5447 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5449 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5452 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5453 /* preserve bits 20-23 for voltage regulator */
5454 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5457 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5459 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5460 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5462 u32 val = serdes_cfg;
5468 tw32_f(MAC_SERDES_CFG, val);
5471 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5473 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5474 tg3_setup_flow_control(tp, 0, 0);
5475 current_link_up = true;
5480 /* Want auto-negotiation. */
5481 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5483 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5484 if (flowctrl & ADVERTISE_1000XPAUSE)
5485 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5486 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5487 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5489 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5490 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5491 tp->serdes_counter &&
5492 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5493 MAC_STATUS_RCVD_CFG)) ==
5494 MAC_STATUS_PCS_SYNCED)) {
5495 tp->serdes_counter--;
5496 current_link_up = true;
5501 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5502 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5504 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5506 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5507 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5508 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5509 MAC_STATUS_SIGNAL_DET)) {
5510 sg_dig_status = tr32(SG_DIG_STATUS);
5511 mac_status = tr32(MAC_STATUS);
5513 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5514 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5515 u32 local_adv = 0, remote_adv = 0;
5517 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5518 local_adv |= ADVERTISE_1000XPAUSE;
5519 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5520 local_adv |= ADVERTISE_1000XPSE_ASYM;
5522 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5523 remote_adv |= LPA_1000XPAUSE;
5524 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5525 remote_adv |= LPA_1000XPAUSE_ASYM;
5527 tp->link_config.rmt_adv =
5528 mii_adv_to_ethtool_adv_x(remote_adv);
5530 tg3_setup_flow_control(tp, local_adv, remote_adv);
5531 current_link_up = true;
5532 tp->serdes_counter = 0;
5533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5535 if (tp->serdes_counter)
5536 tp->serdes_counter--;
5539 u32 val = serdes_cfg;
5546 tw32_f(MAC_SERDES_CFG, val);
5549 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5552 /* Link parallel detection - link is up */
5553 /* only if we have PCS_SYNC and not */
5554 /* receiving config code words */
5555 mac_status = tr32(MAC_STATUS);
5556 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5557 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5558 tg3_setup_flow_control(tp, 0, 0);
5559 current_link_up = true;
5561 TG3_PHYFLG_PARALLEL_DETECT;
5562 tp->serdes_counter =
5563 SERDES_PARALLEL_DET_TIMEOUT;
5565 goto restart_autoneg;
5569 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5570 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574 return current_link_up;
5577 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5579 bool current_link_up = false;
5581 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5584 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5585 u32 txflags, rxflags;
5588 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5589 u32 local_adv = 0, remote_adv = 0;
5591 if (txflags & ANEG_CFG_PS1)
5592 local_adv |= ADVERTISE_1000XPAUSE;
5593 if (txflags & ANEG_CFG_PS2)
5594 local_adv |= ADVERTISE_1000XPSE_ASYM;
5596 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5597 remote_adv |= LPA_1000XPAUSE;
5598 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5599 remote_adv |= LPA_1000XPAUSE_ASYM;
5601 tp->link_config.rmt_adv =
5602 mii_adv_to_ethtool_adv_x(remote_adv);
5604 tg3_setup_flow_control(tp, local_adv, remote_adv);
5606 current_link_up = true;
5608 for (i = 0; i < 30; i++) {
5611 (MAC_STATUS_SYNC_CHANGED |
5612 MAC_STATUS_CFG_CHANGED));
5614 if ((tr32(MAC_STATUS) &
5615 (MAC_STATUS_SYNC_CHANGED |
5616 MAC_STATUS_CFG_CHANGED)) == 0)
5620 mac_status = tr32(MAC_STATUS);
5621 if (!current_link_up &&
5622 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5623 !(mac_status & MAC_STATUS_RCVD_CFG))
5624 current_link_up = true;
5626 tg3_setup_flow_control(tp, 0, 0);
5628 /* Forcing 1000FD link up. */
5629 current_link_up = true;
5631 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5634 tw32_f(MAC_MODE, tp->mac_mode);
5639 return current_link_up;
5642 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5645 u16 orig_active_speed;
5646 u8 orig_active_duplex;
5648 bool current_link_up;
5651 orig_pause_cfg = tp->link_config.active_flowctrl;
5652 orig_active_speed = tp->link_config.active_speed;
5653 orig_active_duplex = tp->link_config.active_duplex;
5655 if (!tg3_flag(tp, HW_AUTONEG) &&
5657 tg3_flag(tp, INIT_COMPLETE)) {
5658 mac_status = tr32(MAC_STATUS);
5659 mac_status &= (MAC_STATUS_PCS_SYNCED |
5660 MAC_STATUS_SIGNAL_DET |
5661 MAC_STATUS_CFG_CHANGED |
5662 MAC_STATUS_RCVD_CFG);
5663 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5664 MAC_STATUS_SIGNAL_DET)) {
5665 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED));
5671 tw32_f(MAC_TX_AUTO_NEG, 0);
5673 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5674 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5675 tw32_f(MAC_MODE, tp->mac_mode);
5678 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5679 tg3_init_bcm8002(tp);
5681 /* Enable link change event even when serdes polling. */
5682 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5685 current_link_up = false;
5686 tp->link_config.rmt_adv = 0;
5687 mac_status = tr32(MAC_STATUS);
5689 if (tg3_flag(tp, HW_AUTONEG))
5690 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5692 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5694 tp->napi[0].hw_status->status =
5695 (SD_STATUS_UPDATED |
5696 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5698 for (i = 0; i < 100; i++) {
5699 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5700 MAC_STATUS_CFG_CHANGED));
5702 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5703 MAC_STATUS_CFG_CHANGED |
5704 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5708 mac_status = tr32(MAC_STATUS);
5709 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5710 current_link_up = false;
5711 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5712 tp->serdes_counter == 0) {
5713 tw32_f(MAC_MODE, (tp->mac_mode |
5714 MAC_MODE_SEND_CONFIGS));
5716 tw32_f(MAC_MODE, tp->mac_mode);
5720 if (current_link_up) {
5721 tp->link_config.active_speed = SPEED_1000;
5722 tp->link_config.active_duplex = DUPLEX_FULL;
5723 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5724 LED_CTRL_LNKLED_OVERRIDE |
5725 LED_CTRL_1000MBPS_ON));
5727 tp->link_config.active_speed = SPEED_UNKNOWN;
5728 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5729 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5730 LED_CTRL_LNKLED_OVERRIDE |
5731 LED_CTRL_TRAFFIC_OVERRIDE));
5734 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5735 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5736 if (orig_pause_cfg != now_pause_cfg ||
5737 orig_active_speed != tp->link_config.active_speed ||
5738 orig_active_duplex != tp->link_config.active_duplex)
5739 tg3_link_report(tp);
5745 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5749 u16 current_speed = SPEED_UNKNOWN;
5750 u8 current_duplex = DUPLEX_UNKNOWN;
5751 bool current_link_up = false;
5752 u32 local_adv, remote_adv, sgsr;
5754 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5755 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5756 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5757 (sgsr & SERDES_TG3_SGMII_MODE)) {
5762 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5764 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5765 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5767 current_link_up = true;
5768 if (sgsr & SERDES_TG3_SPEED_1000) {
5769 current_speed = SPEED_1000;
5770 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5771 } else if (sgsr & SERDES_TG3_SPEED_100) {
5772 current_speed = SPEED_100;
5773 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5775 current_speed = SPEED_10;
5776 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5779 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5780 current_duplex = DUPLEX_FULL;
5782 current_duplex = DUPLEX_HALF;
5785 tw32_f(MAC_MODE, tp->mac_mode);
5788 tg3_clear_mac_status(tp);
5790 goto fiber_setup_done;
5793 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794 tw32_f(MAC_MODE, tp->mac_mode);
5797 tg3_clear_mac_status(tp);
5802 tp->link_config.rmt_adv = 0;
5804 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5805 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5806 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5807 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5808 bmsr |= BMSR_LSTATUS;
5810 bmsr &= ~BMSR_LSTATUS;
5813 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5815 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5816 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5817 /* do nothing, just check for link up at the end */
5818 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5821 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5822 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5823 ADVERTISE_1000XPAUSE |
5824 ADVERTISE_1000XPSE_ASYM |
5827 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5828 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5830 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5831 tg3_writephy(tp, MII_ADVERTISE, newadv);
5832 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5833 tg3_writephy(tp, MII_BMCR, bmcr);
5835 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5836 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5837 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5844 bmcr &= ~BMCR_SPEED1000;
5845 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5847 if (tp->link_config.duplex == DUPLEX_FULL)
5848 new_bmcr |= BMCR_FULLDPLX;
5850 if (new_bmcr != bmcr) {
5851 /* BMCR_SPEED1000 is a reserved bit that needs
5852 * to be set on write.
5854 new_bmcr |= BMCR_SPEED1000;
5856 /* Force a linkdown */
5860 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5861 adv &= ~(ADVERTISE_1000XFULL |
5862 ADVERTISE_1000XHALF |
5864 tg3_writephy(tp, MII_ADVERTISE, adv);
5865 tg3_writephy(tp, MII_BMCR, bmcr |
5869 tg3_carrier_off(tp);
5871 tg3_writephy(tp, MII_BMCR, new_bmcr);
5873 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877 bmsr |= BMSR_LSTATUS;
5879 bmsr &= ~BMSR_LSTATUS;
5881 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5885 if (bmsr & BMSR_LSTATUS) {
5886 current_speed = SPEED_1000;
5887 current_link_up = true;
5888 if (bmcr & BMCR_FULLDPLX)
5889 current_duplex = DUPLEX_FULL;
5891 current_duplex = DUPLEX_HALF;
5896 if (bmcr & BMCR_ANENABLE) {
5899 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5900 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5901 common = local_adv & remote_adv;
5902 if (common & (ADVERTISE_1000XHALF |
5903 ADVERTISE_1000XFULL)) {
5904 if (common & ADVERTISE_1000XFULL)
5905 current_duplex = DUPLEX_FULL;
5907 current_duplex = DUPLEX_HALF;
5909 tp->link_config.rmt_adv =
5910 mii_adv_to_ethtool_adv_x(remote_adv);
5911 } else if (!tg3_flag(tp, 5780_CLASS)) {
5912 /* Link is up via parallel detect */
5914 current_link_up = false;
5920 if (current_link_up && current_duplex == DUPLEX_FULL)
5921 tg3_setup_flow_control(tp, local_adv, remote_adv);
5923 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5924 if (tp->link_config.active_duplex == DUPLEX_HALF)
5925 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5927 tw32_f(MAC_MODE, tp->mac_mode);
5930 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5932 tp->link_config.active_speed = current_speed;
5933 tp->link_config.active_duplex = current_duplex;
5935 tg3_test_and_report_link_chg(tp, current_link_up);
5939 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5941 if (tp->serdes_counter) {
5942 /* Give autoneg time to complete. */
5943 tp->serdes_counter--;
5948 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5951 tg3_readphy(tp, MII_BMCR, &bmcr);
5952 if (bmcr & BMCR_ANENABLE) {
5955 /* Select shadow register 0x1f */
5956 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5957 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5959 /* Select expansion interrupt status register */
5960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5961 MII_TG3_DSP_EXP1_INT_STAT);
5962 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5963 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5965 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5966 /* We have signal detect and not receiving
5967 * config code words, link is up by parallel
5971 bmcr &= ~BMCR_ANENABLE;
5972 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5973 tg3_writephy(tp, MII_BMCR, bmcr);
5974 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5977 } else if (tp->link_up &&
5978 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5979 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5982 /* Select expansion interrupt status register */
5983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5984 MII_TG3_DSP_EXP1_INT_STAT);
5985 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5989 /* Config code words received, turn on autoneg. */
5990 tg3_readphy(tp, MII_BMCR, &bmcr);
5991 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5993 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5999 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6004 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6005 err = tg3_setup_fiber_phy(tp, force_reset);
6006 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6007 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6009 err = tg3_setup_copper_phy(tp, force_reset);
6011 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6014 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6015 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6017 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6022 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6023 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6024 tw32(GRC_MISC_CFG, val);
6027 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6028 (6 << TX_LENGTHS_IPG_SHIFT);
6029 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6030 tg3_asic_rev(tp) == ASIC_REV_5762)
6031 val |= tr32(MAC_TX_LENGTHS) &
6032 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6033 TX_LENGTHS_CNT_DWN_VAL_MSK);
6035 if (tp->link_config.active_speed == SPEED_1000 &&
6036 tp->link_config.active_duplex == DUPLEX_HALF)
6037 tw32(MAC_TX_LENGTHS, val |
6038 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6040 tw32(MAC_TX_LENGTHS, val |
6041 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6043 if (!tg3_flag(tp, 5705_PLUS)) {
6045 tw32(HOSTCC_STAT_COAL_TICKS,
6046 tp->coal.stats_block_coalesce_usecs);
6048 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6052 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6053 val = tr32(PCIE_PWR_MGMT_THRESH);
6055 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6058 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6059 tw32(PCIE_PWR_MGMT_THRESH, val);
6065 /* tp->lock must be held */
6066 static u64 tg3_refclk_read(struct tg3 *tp)
6068 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6069 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6072 /* tp->lock must be held */
6073 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6075 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6076 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6077 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6078 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6081 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6082 static inline void tg3_full_unlock(struct tg3 *tp);
6083 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6085 struct tg3 *tp = netdev_priv(dev);
6087 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6088 SOF_TIMESTAMPING_RX_SOFTWARE |
6089 SOF_TIMESTAMPING_SOFTWARE;
6091 if (tg3_flag(tp, PTP_CAPABLE)) {
6092 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6093 SOF_TIMESTAMPING_RX_HARDWARE |
6094 SOF_TIMESTAMPING_RAW_HARDWARE;
6098 info->phc_index = ptp_clock_index(tp->ptp_clock);
6100 info->phc_index = -1;
6102 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6104 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6105 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6106 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6107 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6111 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6113 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6114 bool neg_adj = false;
6122 /* Frequency adjustment is performed using hardware with a 24 bit
6123 * accumulator and a programmable correction value. On each clk, the
6124 * correction value gets added to the accumulator and when it
6125 * overflows, the time counter is incremented/decremented.
6127 * So conversion from ppb to correction value is
6128 * ppb * (1 << 24) / 1000000000
6130 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6131 TG3_EAV_REF_CLK_CORRECT_MASK;
6133 tg3_full_lock(tp, 0);
6136 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6137 TG3_EAV_REF_CLK_CORRECT_EN |
6138 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6140 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6142 tg3_full_unlock(tp);
6147 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6149 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6151 tg3_full_lock(tp, 0);
6152 tp->ptp_adjust += delta;
6153 tg3_full_unlock(tp);
6158 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6162 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6164 tg3_full_lock(tp, 0);
6165 ns = tg3_refclk_read(tp);
6166 ns += tp->ptp_adjust;
6167 tg3_full_unlock(tp);
6169 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6170 ts->tv_nsec = remainder;
6175 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6176 const struct timespec *ts)
6179 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6181 ns = timespec_to_ns(ts);
6183 tg3_full_lock(tp, 0);
6184 tg3_refclk_write(tp, ns);
6186 tg3_full_unlock(tp);
6191 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6192 struct ptp_clock_request *rq, int on)
6197 static const struct ptp_clock_info tg3_ptp_caps = {
6198 .owner = THIS_MODULE,
6199 .name = "tg3 clock",
6200 .max_adj = 250000000,
6205 .adjfreq = tg3_ptp_adjfreq,
6206 .adjtime = tg3_ptp_adjtime,
6207 .gettime = tg3_ptp_gettime,
6208 .settime = tg3_ptp_settime,
6209 .enable = tg3_ptp_enable,
6212 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6213 struct skb_shared_hwtstamps *timestamp)
6215 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6216 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6220 /* tp->lock must be held */
6221 static void tg3_ptp_init(struct tg3 *tp)
6223 if (!tg3_flag(tp, PTP_CAPABLE))
6226 /* Initialize the hardware clock to the system time. */
6227 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6229 tp->ptp_info = tg3_ptp_caps;
6232 /* tp->lock must be held */
6233 static void tg3_ptp_resume(struct tg3 *tp)
6235 if (!tg3_flag(tp, PTP_CAPABLE))
6238 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6242 static void tg3_ptp_fini(struct tg3 *tp)
6244 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6247 ptp_clock_unregister(tp->ptp_clock);
6248 tp->ptp_clock = NULL;
6252 static inline int tg3_irq_sync(struct tg3 *tp)
6254 return tp->irq_sync;
6257 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6261 dst = (u32 *)((u8 *)dst + off);
6262 for (i = 0; i < len; i += sizeof(u32))
6263 *dst++ = tr32(off + i);
6266 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6268 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6269 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6270 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6271 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6272 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6273 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6274 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6275 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6276 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6277 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6278 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6279 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6280 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6281 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6282 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6283 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6284 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6285 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6286 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6288 if (tg3_flag(tp, SUPPORT_MSIX))
6289 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6291 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6292 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6293 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6294 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6295 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6296 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6297 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6298 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6300 if (!tg3_flag(tp, 5705_PLUS)) {
6301 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6302 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6303 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6306 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6307 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6308 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6309 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6310 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6312 if (tg3_flag(tp, NVRAM))
6313 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6316 static void tg3_dump_state(struct tg3 *tp)
6321 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6325 if (tg3_flag(tp, PCI_EXPRESS)) {
6326 /* Read up to but not including private PCI registers */
6327 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6328 regs[i / sizeof(u32)] = tr32(i);
6330 tg3_dump_legacy_regs(tp, regs);
6332 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6333 if (!regs[i + 0] && !regs[i + 1] &&
6334 !regs[i + 2] && !regs[i + 3])
6337 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6339 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6344 for (i = 0; i < tp->irq_cnt; i++) {
6345 struct tg3_napi *tnapi = &tp->napi[i];
6347 /* SW status block */
6349 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6351 tnapi->hw_status->status,
6352 tnapi->hw_status->status_tag,
6353 tnapi->hw_status->rx_jumbo_consumer,
6354 tnapi->hw_status->rx_consumer,
6355 tnapi->hw_status->rx_mini_consumer,
6356 tnapi->hw_status->idx[0].rx_producer,
6357 tnapi->hw_status->idx[0].tx_consumer);
6360 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6362 tnapi->last_tag, tnapi->last_irq_tag,
6363 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6365 tnapi->prodring.rx_std_prod_idx,
6366 tnapi->prodring.rx_std_cons_idx,
6367 tnapi->prodring.rx_jmb_prod_idx,
6368 tnapi->prodring.rx_jmb_cons_idx);
6372 /* This is called whenever we suspect that the system chipset is re-
6373 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6374 * is bogus tx completions. We try to recover by setting the
6375 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6378 static void tg3_tx_recover(struct tg3 *tp)
6380 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6381 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6383 netdev_warn(tp->dev,
6384 "The system may be re-ordering memory-mapped I/O "
6385 "cycles to the network device, attempting to recover. "
6386 "Please report the problem to the driver maintainer "
6387 "and include system chipset information.\n");
6389 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6392 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6394 /* Tell compiler to fetch tx indices from memory. */
6396 return tnapi->tx_pending -
6397 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6400 /* Tigon3 never reports partial packet sends. So we do not
6401 * need special logic to handle SKBs that have not had all
6402 * of their frags sent yet, like SunGEM does.
6404 static void tg3_tx(struct tg3_napi *tnapi)
6406 struct tg3 *tp = tnapi->tp;
6407 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6408 u32 sw_idx = tnapi->tx_cons;
6409 struct netdev_queue *txq;
6410 int index = tnapi - tp->napi;
6411 unsigned int pkts_compl = 0, bytes_compl = 0;
6413 if (tg3_flag(tp, ENABLE_TSS))
6416 txq = netdev_get_tx_queue(tp->dev, index);
6418 while (sw_idx != hw_idx) {
6419 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6420 struct sk_buff *skb = ri->skb;
6423 if (unlikely(skb == NULL)) {
6428 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6429 struct skb_shared_hwtstamps timestamp;
6430 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6431 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6433 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6435 skb_tstamp_tx(skb, ×tamp);
6438 pci_unmap_single(tp->pdev,
6439 dma_unmap_addr(ri, mapping),
6445 while (ri->fragmented) {
6446 ri->fragmented = false;
6447 sw_idx = NEXT_TX(sw_idx);
6448 ri = &tnapi->tx_buffers[sw_idx];
6451 sw_idx = NEXT_TX(sw_idx);
6453 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6454 ri = &tnapi->tx_buffers[sw_idx];
6455 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6458 pci_unmap_page(tp->pdev,
6459 dma_unmap_addr(ri, mapping),
6460 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6463 while (ri->fragmented) {
6464 ri->fragmented = false;
6465 sw_idx = NEXT_TX(sw_idx);
6466 ri = &tnapi->tx_buffers[sw_idx];
6469 sw_idx = NEXT_TX(sw_idx);
6473 bytes_compl += skb->len;
6477 if (unlikely(tx_bug)) {
6483 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6485 tnapi->tx_cons = sw_idx;
6487 /* Need to make the tx_cons update visible to tg3_start_xmit()
6488 * before checking for netif_queue_stopped(). Without the
6489 * memory barrier, there is a small possibility that tg3_start_xmit()
6490 * will miss it and cause the queue to be stopped forever.
6494 if (unlikely(netif_tx_queue_stopped(txq) &&
6495 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6496 __netif_tx_lock(txq, smp_processor_id());
6497 if (netif_tx_queue_stopped(txq) &&
6498 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6499 netif_tx_wake_queue(txq);
6500 __netif_tx_unlock(txq);
6504 static void tg3_frag_free(bool is_frag, void *data)
6507 put_page(virt_to_head_page(data));
6512 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6514 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6515 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6520 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6521 map_sz, PCI_DMA_FROMDEVICE);
6522 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6527 /* Returns size of skb allocated or < 0 on error.
6529 * We only need to fill in the address because the other members
6530 * of the RX descriptor are invariant, see tg3_init_rings.
6532 * Note the purposeful assymetry of cpu vs. chip accesses. For
6533 * posting buffers we only dirty the first cache line of the RX
6534 * descriptor (containing the address). Whereas for the RX status
6535 * buffers the cpu only reads the last cacheline of the RX descriptor
6536 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6538 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6539 u32 opaque_key, u32 dest_idx_unmasked,
6540 unsigned int *frag_size)
6542 struct tg3_rx_buffer_desc *desc;
6543 struct ring_info *map;
6546 int skb_size, data_size, dest_idx;
6548 switch (opaque_key) {
6549 case RXD_OPAQUE_RING_STD:
6550 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6551 desc = &tpr->rx_std[dest_idx];
6552 map = &tpr->rx_std_buffers[dest_idx];
6553 data_size = tp->rx_pkt_map_sz;
6556 case RXD_OPAQUE_RING_JUMBO:
6557 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6558 desc = &tpr->rx_jmb[dest_idx].std;
6559 map = &tpr->rx_jmb_buffers[dest_idx];
6560 data_size = TG3_RX_JMB_MAP_SZ;
6567 /* Do not overwrite any of the map or rp information
6568 * until we are sure we can commit to a new buffer.
6570 * Callers depend upon this behavior and assume that
6571 * we leave everything unchanged if we fail.
6573 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6574 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6575 if (skb_size <= PAGE_SIZE) {
6576 data = netdev_alloc_frag(skb_size);
6577 *frag_size = skb_size;
6579 data = kmalloc(skb_size, GFP_ATOMIC);
6585 mapping = pci_map_single(tp->pdev,
6586 data + TG3_RX_OFFSET(tp),
6588 PCI_DMA_FROMDEVICE);
6589 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6590 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6595 dma_unmap_addr_set(map, mapping, mapping);
6597 desc->addr_hi = ((u64)mapping >> 32);
6598 desc->addr_lo = ((u64)mapping & 0xffffffff);
6603 /* We only need to move over in the address because the other
6604 * members of the RX descriptor are invariant. See notes above
6605 * tg3_alloc_rx_data for full details.
6607 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6608 struct tg3_rx_prodring_set *dpr,
6609 u32 opaque_key, int src_idx,
6610 u32 dest_idx_unmasked)
6612 struct tg3 *tp = tnapi->tp;
6613 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6614 struct ring_info *src_map, *dest_map;
6615 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6618 switch (opaque_key) {
6619 case RXD_OPAQUE_RING_STD:
6620 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6621 dest_desc = &dpr->rx_std[dest_idx];
6622 dest_map = &dpr->rx_std_buffers[dest_idx];
6623 src_desc = &spr->rx_std[src_idx];
6624 src_map = &spr->rx_std_buffers[src_idx];
6627 case RXD_OPAQUE_RING_JUMBO:
6628 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6629 dest_desc = &dpr->rx_jmb[dest_idx].std;
6630 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6631 src_desc = &spr->rx_jmb[src_idx].std;
6632 src_map = &spr->rx_jmb_buffers[src_idx];
6639 dest_map->data = src_map->data;
6640 dma_unmap_addr_set(dest_map, mapping,
6641 dma_unmap_addr(src_map, mapping));
6642 dest_desc->addr_hi = src_desc->addr_hi;
6643 dest_desc->addr_lo = src_desc->addr_lo;
6645 /* Ensure that the update to the skb happens after the physical
6646 * addresses have been transferred to the new BD location.
6650 src_map->data = NULL;
6653 /* The RX ring scheme is composed of multiple rings which post fresh
6654 * buffers to the chip, and one special ring the chip uses to report
6655 * status back to the host.
6657 * The special ring reports the status of received packets to the
6658 * host. The chip does not write into the original descriptor the
6659 * RX buffer was obtained from. The chip simply takes the original
6660 * descriptor as provided by the host, updates the status and length
6661 * field, then writes this into the next status ring entry.
6663 * Each ring the host uses to post buffers to the chip is described
6664 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6665 * it is first placed into the on-chip ram. When the packet's length
6666 * is known, it walks down the TG3_BDINFO entries to select the ring.
6667 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6668 * which is within the range of the new packet's length is chosen.
6670 * The "separate ring for rx status" scheme may sound queer, but it makes
6671 * sense from a cache coherency perspective. If only the host writes
6672 * to the buffer post rings, and only the chip writes to the rx status
6673 * rings, then cache lines never move beyond shared-modified state.
6674 * If both the host and chip were to write into the same ring, cache line
6675 * eviction could occur since both entities want it in an exclusive state.
6677 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6679 struct tg3 *tp = tnapi->tp;
6680 u32 work_mask, rx_std_posted = 0;
6681 u32 std_prod_idx, jmb_prod_idx;
6682 u32 sw_idx = tnapi->rx_rcb_ptr;
6685 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6687 hw_idx = *(tnapi->rx_rcb_prod_idx);
6689 * We need to order the read of hw_idx and the read of
6690 * the opaque cookie.
6695 std_prod_idx = tpr->rx_std_prod_idx;
6696 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6697 while (sw_idx != hw_idx && budget > 0) {
6698 struct ring_info *ri;
6699 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6701 struct sk_buff *skb;
6702 dma_addr_t dma_addr;
6703 u32 opaque_key, desc_idx, *post_ptr;
6707 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6708 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6709 if (opaque_key == RXD_OPAQUE_RING_STD) {
6710 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6711 dma_addr = dma_unmap_addr(ri, mapping);
6713 post_ptr = &std_prod_idx;
6715 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6716 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6717 dma_addr = dma_unmap_addr(ri, mapping);
6719 post_ptr = &jmb_prod_idx;
6721 goto next_pkt_nopost;
6723 work_mask |= opaque_key;
6725 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6726 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6728 tg3_recycle_rx(tnapi, tpr, opaque_key,
6729 desc_idx, *post_ptr);
6731 /* Other statistics kept track of by card. */
6736 prefetch(data + TG3_RX_OFFSET(tp));
6737 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6740 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6741 RXD_FLAG_PTPSTAT_PTPV1 ||
6742 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6743 RXD_FLAG_PTPSTAT_PTPV2) {
6744 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6745 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6748 if (len > TG3_RX_COPY_THRESH(tp)) {
6750 unsigned int frag_size;
6752 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6753 *post_ptr, &frag_size);
6757 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6758 PCI_DMA_FROMDEVICE);
6760 skb = build_skb(data, frag_size);
6762 tg3_frag_free(frag_size != 0, data);
6763 goto drop_it_no_recycle;
6765 skb_reserve(skb, TG3_RX_OFFSET(tp));
6766 /* Ensure that the update to the data happens
6767 * after the usage of the old DMA mapping.
6774 tg3_recycle_rx(tnapi, tpr, opaque_key,
6775 desc_idx, *post_ptr);
6777 skb = netdev_alloc_skb(tp->dev,
6778 len + TG3_RAW_IP_ALIGN);
6780 goto drop_it_no_recycle;
6782 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6783 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6785 data + TG3_RX_OFFSET(tp),
6787 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6792 tg3_hwclock_to_timestamp(tp, tstamp,
6793 skb_hwtstamps(skb));
6795 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6796 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6797 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6798 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6799 skb->ip_summed = CHECKSUM_UNNECESSARY;
6801 skb_checksum_none_assert(skb);
6803 skb->protocol = eth_type_trans(skb, tp->dev);
6805 if (len > (tp->dev->mtu + ETH_HLEN) &&
6806 skb->protocol != htons(ETH_P_8021Q)) {
6808 goto drop_it_no_recycle;
6811 if (desc->type_flags & RXD_FLAG_VLAN &&
6812 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6813 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6814 desc->err_vlan & RXD_VLAN_MASK);
6816 napi_gro_receive(&tnapi->napi, skb);
6824 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6825 tpr->rx_std_prod_idx = std_prod_idx &
6826 tp->rx_std_ring_mask;
6827 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6828 tpr->rx_std_prod_idx);
6829 work_mask &= ~RXD_OPAQUE_RING_STD;
6834 sw_idx &= tp->rx_ret_ring_mask;
6836 /* Refresh hw_idx to see if there is new work */
6837 if (sw_idx == hw_idx) {
6838 hw_idx = *(tnapi->rx_rcb_prod_idx);
6843 /* ACK the status ring. */
6844 tnapi->rx_rcb_ptr = sw_idx;
6845 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6847 /* Refill RX ring(s). */
6848 if (!tg3_flag(tp, ENABLE_RSS)) {
6849 /* Sync BD data before updating mailbox */
6852 if (work_mask & RXD_OPAQUE_RING_STD) {
6853 tpr->rx_std_prod_idx = std_prod_idx &
6854 tp->rx_std_ring_mask;
6855 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6856 tpr->rx_std_prod_idx);
6858 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6859 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6860 tp->rx_jmb_ring_mask;
6861 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6862 tpr->rx_jmb_prod_idx);
6865 } else if (work_mask) {
6866 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6867 * updated before the producer indices can be updated.
6871 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6872 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6874 if (tnapi != &tp->napi[1]) {
6875 tp->rx_refill = true;
6876 napi_schedule(&tp->napi[1].napi);
6883 static void tg3_poll_link(struct tg3 *tp)
6885 /* handle link change and other phy events */
6886 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6887 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6889 if (sblk->status & SD_STATUS_LINK_CHG) {
6890 sblk->status = SD_STATUS_UPDATED |
6891 (sblk->status & ~SD_STATUS_LINK_CHG);
6892 spin_lock(&tp->lock);
6893 if (tg3_flag(tp, USE_PHYLIB)) {
6895 (MAC_STATUS_SYNC_CHANGED |
6896 MAC_STATUS_CFG_CHANGED |
6897 MAC_STATUS_MI_COMPLETION |
6898 MAC_STATUS_LNKSTATE_CHANGED));
6901 tg3_setup_phy(tp, false);
6902 spin_unlock(&tp->lock);
6907 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6908 struct tg3_rx_prodring_set *dpr,
6909 struct tg3_rx_prodring_set *spr)
6911 u32 si, di, cpycnt, src_prod_idx;
6915 src_prod_idx = spr->rx_std_prod_idx;
6917 /* Make sure updates to the rx_std_buffers[] entries and the
6918 * standard producer index are seen in the correct order.
6922 if (spr->rx_std_cons_idx == src_prod_idx)
6925 if (spr->rx_std_cons_idx < src_prod_idx)
6926 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6928 cpycnt = tp->rx_std_ring_mask + 1 -
6929 spr->rx_std_cons_idx;
6931 cpycnt = min(cpycnt,
6932 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6934 si = spr->rx_std_cons_idx;
6935 di = dpr->rx_std_prod_idx;
6937 for (i = di; i < di + cpycnt; i++) {
6938 if (dpr->rx_std_buffers[i].data) {
6948 /* Ensure that updates to the rx_std_buffers ring and the
6949 * shadowed hardware producer ring from tg3_recycle_skb() are
6950 * ordered correctly WRT the skb check above.
6954 memcpy(&dpr->rx_std_buffers[di],
6955 &spr->rx_std_buffers[si],
6956 cpycnt * sizeof(struct ring_info));
6958 for (i = 0; i < cpycnt; i++, di++, si++) {
6959 struct tg3_rx_buffer_desc *sbd, *dbd;
6960 sbd = &spr->rx_std[si];
6961 dbd = &dpr->rx_std[di];
6962 dbd->addr_hi = sbd->addr_hi;
6963 dbd->addr_lo = sbd->addr_lo;
6966 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6967 tp->rx_std_ring_mask;
6968 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6969 tp->rx_std_ring_mask;
6973 src_prod_idx = spr->rx_jmb_prod_idx;
6975 /* Make sure updates to the rx_jmb_buffers[] entries and
6976 * the jumbo producer index are seen in the correct order.
6980 if (spr->rx_jmb_cons_idx == src_prod_idx)
6983 if (spr->rx_jmb_cons_idx < src_prod_idx)
6984 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6986 cpycnt = tp->rx_jmb_ring_mask + 1 -
6987 spr->rx_jmb_cons_idx;
6989 cpycnt = min(cpycnt,
6990 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6992 si = spr->rx_jmb_cons_idx;
6993 di = dpr->rx_jmb_prod_idx;
6995 for (i = di; i < di + cpycnt; i++) {
6996 if (dpr->rx_jmb_buffers[i].data) {
7006 /* Ensure that updates to the rx_jmb_buffers ring and the
7007 * shadowed hardware producer ring from tg3_recycle_skb() are
7008 * ordered correctly WRT the skb check above.
7012 memcpy(&dpr->rx_jmb_buffers[di],
7013 &spr->rx_jmb_buffers[si],
7014 cpycnt * sizeof(struct ring_info));
7016 for (i = 0; i < cpycnt; i++, di++, si++) {
7017 struct tg3_rx_buffer_desc *sbd, *dbd;
7018 sbd = &spr->rx_jmb[si].std;
7019 dbd = &dpr->rx_jmb[di].std;
7020 dbd->addr_hi = sbd->addr_hi;
7021 dbd->addr_lo = sbd->addr_lo;
7024 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7025 tp->rx_jmb_ring_mask;
7026 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7027 tp->rx_jmb_ring_mask;
7033 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7035 struct tg3 *tp = tnapi->tp;
7037 /* run TX completion thread */
7038 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7040 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7044 if (!tnapi->rx_rcb_prod_idx)
7047 /* run RX thread, within the bounds set by NAPI.
7048 * All RX "locking" is done by ensuring outside
7049 * code synchronizes with tg3->napi.poll()
7051 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7052 work_done += tg3_rx(tnapi, budget - work_done);
7054 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7055 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7057 u32 std_prod_idx = dpr->rx_std_prod_idx;
7058 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7060 tp->rx_refill = false;
7061 for (i = 1; i <= tp->rxq_cnt; i++)
7062 err |= tg3_rx_prodring_xfer(tp, dpr,
7063 &tp->napi[i].prodring);
7067 if (std_prod_idx != dpr->rx_std_prod_idx)
7068 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7069 dpr->rx_std_prod_idx);
7071 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7072 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7073 dpr->rx_jmb_prod_idx);
7078 tw32_f(HOSTCC_MODE, tp->coal_now);
7084 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7086 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7087 schedule_work(&tp->reset_task);
7090 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7092 cancel_work_sync(&tp->reset_task);
7093 tg3_flag_clear(tp, RESET_TASK_PENDING);
7094 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7097 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7099 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7100 struct tg3 *tp = tnapi->tp;
7102 struct tg3_hw_status *sblk = tnapi->hw_status;
7105 work_done = tg3_poll_work(tnapi, work_done, budget);
7107 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7110 if (unlikely(work_done >= budget))
7113 /* tp->last_tag is used in tg3_int_reenable() below
7114 * to tell the hw how much work has been processed,
7115 * so we must read it before checking for more work.
7117 tnapi->last_tag = sblk->status_tag;
7118 tnapi->last_irq_tag = tnapi->last_tag;
7121 /* check for RX/TX work to do */
7122 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7123 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7125 /* This test here is not race free, but will reduce
7126 * the number of interrupts by looping again.
7128 if (tnapi == &tp->napi[1] && tp->rx_refill)
7131 napi_complete(napi);
7132 /* Reenable interrupts. */
7133 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7135 /* This test here is synchronized by napi_schedule()
7136 * and napi_complete() to close the race condition.
7138 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7139 tw32(HOSTCC_MODE, tp->coalesce_mode |
7140 HOSTCC_MODE_ENABLE |
7151 /* work_done is guaranteed to be less than budget. */
7152 napi_complete(napi);
7153 tg3_reset_task_schedule(tp);
7157 static void tg3_process_error(struct tg3 *tp)
7160 bool real_error = false;
7162 if (tg3_flag(tp, ERROR_PROCESSED))
7165 /* Check Flow Attention register */
7166 val = tr32(HOSTCC_FLOW_ATTN);
7167 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7168 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7172 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7173 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7177 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7178 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7187 tg3_flag_set(tp, ERROR_PROCESSED);
7188 tg3_reset_task_schedule(tp);
7191 static int tg3_poll(struct napi_struct *napi, int budget)
7193 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7194 struct tg3 *tp = tnapi->tp;
7196 struct tg3_hw_status *sblk = tnapi->hw_status;
7199 if (sblk->status & SD_STATUS_ERROR)
7200 tg3_process_error(tp);
7204 work_done = tg3_poll_work(tnapi, work_done, budget);
7206 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7209 if (unlikely(work_done >= budget))
7212 if (tg3_flag(tp, TAGGED_STATUS)) {
7213 /* tp->last_tag is used in tg3_int_reenable() below
7214 * to tell the hw how much work has been processed,
7215 * so we must read it before checking for more work.
7217 tnapi->last_tag = sblk->status_tag;
7218 tnapi->last_irq_tag = tnapi->last_tag;
7221 sblk->status &= ~SD_STATUS_UPDATED;
7223 if (likely(!tg3_has_work(tnapi))) {
7224 napi_complete(napi);
7225 tg3_int_reenable(tnapi);
7233 /* work_done is guaranteed to be less than budget. */
7234 napi_complete(napi);
7235 tg3_reset_task_schedule(tp);
7239 static void tg3_napi_disable(struct tg3 *tp)
7243 for (i = tp->irq_cnt - 1; i >= 0; i--)
7244 napi_disable(&tp->napi[i].napi);
7247 static void tg3_napi_enable(struct tg3 *tp)
7251 for (i = 0; i < tp->irq_cnt; i++)
7252 napi_enable(&tp->napi[i].napi);
7255 static void tg3_napi_init(struct tg3 *tp)
7259 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7260 for (i = 1; i < tp->irq_cnt; i++)
7261 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7264 static void tg3_napi_fini(struct tg3 *tp)
7268 for (i = 0; i < tp->irq_cnt; i++)
7269 netif_napi_del(&tp->napi[i].napi);
7272 static inline void tg3_netif_stop(struct tg3 *tp)
7274 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7275 tg3_napi_disable(tp);
7276 netif_carrier_off(tp->dev);
7277 netif_tx_disable(tp->dev);
7280 /* tp->lock must be held */
7281 static inline void tg3_netif_start(struct tg3 *tp)
7285 /* NOTE: unconditional netif_tx_wake_all_queues is only
7286 * appropriate so long as all callers are assured to
7287 * have free tx slots (such as after tg3_init_hw)
7289 netif_tx_wake_all_queues(tp->dev);
7292 netif_carrier_on(tp->dev);
7294 tg3_napi_enable(tp);
7295 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7296 tg3_enable_ints(tp);
7299 static void tg3_irq_quiesce(struct tg3 *tp)
7303 BUG_ON(tp->irq_sync);
7308 for (i = 0; i < tp->irq_cnt; i++)
7309 synchronize_irq(tp->napi[i].irq_vec);
7312 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7313 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7314 * with as well. Most of the time, this is not necessary except when
7315 * shutting down the device.
7317 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7319 spin_lock_bh(&tp->lock);
7321 tg3_irq_quiesce(tp);
7324 static inline void tg3_full_unlock(struct tg3 *tp)
7326 spin_unlock_bh(&tp->lock);
7329 /* One-shot MSI handler - Chip automatically disables interrupt
7330 * after sending MSI so driver doesn't have to do it.
7332 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7334 struct tg3_napi *tnapi = dev_id;
7335 struct tg3 *tp = tnapi->tp;
7337 prefetch(tnapi->hw_status);
7339 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7341 if (likely(!tg3_irq_sync(tp)))
7342 napi_schedule(&tnapi->napi);
7347 /* MSI ISR - No need to check for interrupt sharing and no need to
7348 * flush status block and interrupt mailbox. PCI ordering rules
7349 * guarantee that MSI will arrive after the status block.
7351 static irqreturn_t tg3_msi(int irq, void *dev_id)
7353 struct tg3_napi *tnapi = dev_id;
7354 struct tg3 *tp = tnapi->tp;
7356 prefetch(tnapi->hw_status);
7358 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7360 * Writing any value to intr-mbox-0 clears PCI INTA# and
7361 * chip-internal interrupt pending events.
7362 * Writing non-zero to intr-mbox-0 additional tells the
7363 * NIC to stop sending us irqs, engaging "in-intr-handler"
7366 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7367 if (likely(!tg3_irq_sync(tp)))
7368 napi_schedule(&tnapi->napi);
7370 return IRQ_RETVAL(1);
7373 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7375 struct tg3_napi *tnapi = dev_id;
7376 struct tg3 *tp = tnapi->tp;
7377 struct tg3_hw_status *sblk = tnapi->hw_status;
7378 unsigned int handled = 1;
7380 /* In INTx mode, it is possible for the interrupt to arrive at
7381 * the CPU before the status block posted prior to the interrupt.
7382 * Reading the PCI State register will confirm whether the
7383 * interrupt is ours and will flush the status block.
7385 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7386 if (tg3_flag(tp, CHIP_RESETTING) ||
7387 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7394 * Writing any value to intr-mbox-0 clears PCI INTA# and
7395 * chip-internal interrupt pending events.
7396 * Writing non-zero to intr-mbox-0 additional tells the
7397 * NIC to stop sending us irqs, engaging "in-intr-handler"
7400 * Flush the mailbox to de-assert the IRQ immediately to prevent
7401 * spurious interrupts. The flush impacts performance but
7402 * excessive spurious interrupts can be worse in some cases.
7404 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7405 if (tg3_irq_sync(tp))
7407 sblk->status &= ~SD_STATUS_UPDATED;
7408 if (likely(tg3_has_work(tnapi))) {
7409 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7410 napi_schedule(&tnapi->napi);
7412 /* No work, shared interrupt perhaps? re-enable
7413 * interrupts, and flush that PCI write
7415 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7419 return IRQ_RETVAL(handled);
7422 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7424 struct tg3_napi *tnapi = dev_id;
7425 struct tg3 *tp = tnapi->tp;
7426 struct tg3_hw_status *sblk = tnapi->hw_status;
7427 unsigned int handled = 1;
7429 /* In INTx mode, it is possible for the interrupt to arrive at
7430 * the CPU before the status block posted prior to the interrupt.
7431 * Reading the PCI State register will confirm whether the
7432 * interrupt is ours and will flush the status block.
7434 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7435 if (tg3_flag(tp, CHIP_RESETTING) ||
7436 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7443 * writing any value to intr-mbox-0 clears PCI INTA# and
7444 * chip-internal interrupt pending events.
7445 * writing non-zero to intr-mbox-0 additional tells the
7446 * NIC to stop sending us irqs, engaging "in-intr-handler"
7449 * Flush the mailbox to de-assert the IRQ immediately to prevent
7450 * spurious interrupts. The flush impacts performance but
7451 * excessive spurious interrupts can be worse in some cases.
7453 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7456 * In a shared interrupt configuration, sometimes other devices'
7457 * interrupts will scream. We record the current status tag here
7458 * so that the above check can report that the screaming interrupts
7459 * are unhandled. Eventually they will be silenced.
7461 tnapi->last_irq_tag = sblk->status_tag;
7463 if (tg3_irq_sync(tp))
7466 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7468 napi_schedule(&tnapi->napi);
7471 return IRQ_RETVAL(handled);
7474 /* ISR for interrupt test */
7475 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7477 struct tg3_napi *tnapi = dev_id;
7478 struct tg3 *tp = tnapi->tp;
7479 struct tg3_hw_status *sblk = tnapi->hw_status;
7481 if ((sblk->status & SD_STATUS_UPDATED) ||
7482 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7483 tg3_disable_ints(tp);
7484 return IRQ_RETVAL(1);
7486 return IRQ_RETVAL(0);
7489 #ifdef CONFIG_NET_POLL_CONTROLLER
7490 static void tg3_poll_controller(struct net_device *dev)
7493 struct tg3 *tp = netdev_priv(dev);
7495 if (tg3_irq_sync(tp))
7498 for (i = 0; i < tp->irq_cnt; i++)
7499 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7503 static void tg3_tx_timeout(struct net_device *dev)
7505 struct tg3 *tp = netdev_priv(dev);
7507 if (netif_msg_tx_err(tp)) {
7508 netdev_err(dev, "transmit timed out, resetting\n");
7512 tg3_reset_task_schedule(tp);
7515 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7516 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7518 u32 base = (u32) mapping & 0xffffffff;
7520 return (base > 0xffffdcc0) && (base + len + 8 < base);
7523 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7524 * of any 4GB boundaries: 4G, 8G, etc
7526 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7529 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7530 u32 base = (u32) mapping & 0xffffffff;
7532 return ((base + len + (mss & 0x3fff)) < base);
7537 /* Test for DMA addresses > 40-bit */
7538 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7541 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7542 if (tg3_flag(tp, 40BIT_DMA_BUG))
7543 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7550 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7551 dma_addr_t mapping, u32 len, u32 flags,
7554 txbd->addr_hi = ((u64) mapping >> 32);
7555 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7556 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7557 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7560 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7561 dma_addr_t map, u32 len, u32 flags,
7564 struct tg3 *tp = tnapi->tp;
7567 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7570 if (tg3_4g_overflow_test(map, len))
7573 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7576 if (tg3_40bit_overflow_test(tp, map, len))
7579 if (tp->dma_limit) {
7580 u32 prvidx = *entry;
7581 u32 tmp_flag = flags & ~TXD_FLAG_END;
7582 while (len > tp->dma_limit && *budget) {
7583 u32 frag_len = tp->dma_limit;
7584 len -= tp->dma_limit;
7586 /* Avoid the 8byte DMA problem */
7588 len += tp->dma_limit / 2;
7589 frag_len = tp->dma_limit / 2;
7592 tnapi->tx_buffers[*entry].fragmented = true;
7594 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7595 frag_len, tmp_flag, mss, vlan);
7598 *entry = NEXT_TX(*entry);
7605 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7606 len, flags, mss, vlan);
7608 *entry = NEXT_TX(*entry);
7611 tnapi->tx_buffers[prvidx].fragmented = false;
7615 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7616 len, flags, mss, vlan);
7617 *entry = NEXT_TX(*entry);
7623 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7626 struct sk_buff *skb;
7627 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7632 pci_unmap_single(tnapi->tp->pdev,
7633 dma_unmap_addr(txb, mapping),
7637 while (txb->fragmented) {
7638 txb->fragmented = false;
7639 entry = NEXT_TX(entry);
7640 txb = &tnapi->tx_buffers[entry];
7643 for (i = 0; i <= last; i++) {
7644 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7646 entry = NEXT_TX(entry);
7647 txb = &tnapi->tx_buffers[entry];
7649 pci_unmap_page(tnapi->tp->pdev,
7650 dma_unmap_addr(txb, mapping),
7651 skb_frag_size(frag), PCI_DMA_TODEVICE);
7653 while (txb->fragmented) {
7654 txb->fragmented = false;
7655 entry = NEXT_TX(entry);
7656 txb = &tnapi->tx_buffers[entry];
7661 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7662 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7663 struct sk_buff **pskb,
7664 u32 *entry, u32 *budget,
7665 u32 base_flags, u32 mss, u32 vlan)
7667 struct tg3 *tp = tnapi->tp;
7668 struct sk_buff *new_skb, *skb = *pskb;
7669 dma_addr_t new_addr = 0;
7672 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7673 new_skb = skb_copy(skb, GFP_ATOMIC);
7675 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7677 new_skb = skb_copy_expand(skb,
7678 skb_headroom(skb) + more_headroom,
7679 skb_tailroom(skb), GFP_ATOMIC);
7685 /* New SKB is guaranteed to be linear. */
7686 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7688 /* Make sure the mapping succeeded */
7689 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7690 dev_kfree_skb(new_skb);
7693 u32 save_entry = *entry;
7695 base_flags |= TXD_FLAG_END;
7697 tnapi->tx_buffers[*entry].skb = new_skb;
7698 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7701 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7702 new_skb->len, base_flags,
7704 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7705 dev_kfree_skb(new_skb);
7716 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7718 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7719 * TSO header is greater than 80 bytes.
7721 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7723 struct sk_buff *segs, *nskb;
7724 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7726 /* Estimate the number of fragments in the worst case */
7727 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7728 netif_stop_queue(tp->dev);
7730 /* netif_tx_stop_queue() must be done before checking
7731 * checking tx index in tg3_tx_avail() below, because in
7732 * tg3_tx(), we update tx index before checking for
7733 * netif_tx_queue_stopped().
7736 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7737 return NETDEV_TX_BUSY;
7739 netif_wake_queue(tp->dev);
7742 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7744 goto tg3_tso_bug_end;
7750 tg3_start_xmit(nskb, tp->dev);
7756 return NETDEV_TX_OK;
7759 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7760 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7762 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7764 struct tg3 *tp = netdev_priv(dev);
7765 u32 len, entry, base_flags, mss, vlan = 0;
7767 int i = -1, would_hit_hwbug;
7769 struct tg3_napi *tnapi;
7770 struct netdev_queue *txq;
7773 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7774 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7775 if (tg3_flag(tp, ENABLE_TSS))
7778 budget = tg3_tx_avail(tnapi);
7780 /* We are running in BH disabled context with netif_tx_lock
7781 * and TX reclaim runs via tp->napi.poll inside of a software
7782 * interrupt. Furthermore, IRQ processing runs lockless so we have
7783 * no IRQ context deadlocks to worry about either. Rejoice!
7785 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7786 if (!netif_tx_queue_stopped(txq)) {
7787 netif_tx_stop_queue(txq);
7789 /* This is a hard error, log it. */
7791 "BUG! Tx Ring full when queue awake!\n");
7793 return NETDEV_TX_BUSY;
7796 entry = tnapi->tx_prod;
7798 if (skb->ip_summed == CHECKSUM_PARTIAL)
7799 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7801 mss = skb_shinfo(skb)->gso_size;
7804 u32 tcp_opt_len, hdr_len;
7806 if (skb_header_cloned(skb) &&
7807 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7811 tcp_opt_len = tcp_optlen(skb);
7813 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7815 if (!skb_is_gso_v6(skb)) {
7817 iph->tot_len = htons(mss + hdr_len);
7820 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7821 tg3_flag(tp, TSO_BUG))
7822 return tg3_tso_bug(tp, skb);
7824 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7825 TXD_FLAG_CPU_POST_DMA);
7827 if (tg3_flag(tp, HW_TSO_1) ||
7828 tg3_flag(tp, HW_TSO_2) ||
7829 tg3_flag(tp, HW_TSO_3)) {
7830 tcp_hdr(skb)->check = 0;
7831 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7833 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7838 if (tg3_flag(tp, HW_TSO_3)) {
7839 mss |= (hdr_len & 0xc) << 12;
7841 base_flags |= 0x00000010;
7842 base_flags |= (hdr_len & 0x3e0) << 5;
7843 } else if (tg3_flag(tp, HW_TSO_2))
7844 mss |= hdr_len << 9;
7845 else if (tg3_flag(tp, HW_TSO_1) ||
7846 tg3_asic_rev(tp) == ASIC_REV_5705) {
7847 if (tcp_opt_len || iph->ihl > 5) {
7850 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7851 mss |= (tsflags << 11);
7854 if (tcp_opt_len || iph->ihl > 5) {
7857 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7858 base_flags |= tsflags << 12;
7863 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7864 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7865 base_flags |= TXD_FLAG_JMB_PKT;
7867 if (vlan_tx_tag_present(skb)) {
7868 base_flags |= TXD_FLAG_VLAN;
7869 vlan = vlan_tx_tag_get(skb);
7872 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7873 tg3_flag(tp, TX_TSTAMP_EN)) {
7874 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7875 base_flags |= TXD_FLAG_HWTSTAMP;
7878 len = skb_headlen(skb);
7880 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7881 if (pci_dma_mapping_error(tp->pdev, mapping))
7885 tnapi->tx_buffers[entry].skb = skb;
7886 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7888 would_hit_hwbug = 0;
7890 if (tg3_flag(tp, 5701_DMA_BUG))
7891 would_hit_hwbug = 1;
7893 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7894 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7896 would_hit_hwbug = 1;
7897 } else if (skb_shinfo(skb)->nr_frags > 0) {
7900 if (!tg3_flag(tp, HW_TSO_1) &&
7901 !tg3_flag(tp, HW_TSO_2) &&
7902 !tg3_flag(tp, HW_TSO_3))
7905 /* Now loop through additional data
7906 * fragments, and queue them.
7908 last = skb_shinfo(skb)->nr_frags - 1;
7909 for (i = 0; i <= last; i++) {
7910 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7912 len = skb_frag_size(frag);
7913 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7914 len, DMA_TO_DEVICE);
7916 tnapi->tx_buffers[entry].skb = NULL;
7917 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7919 if (dma_mapping_error(&tp->pdev->dev, mapping))
7923 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7925 ((i == last) ? TXD_FLAG_END : 0),
7927 would_hit_hwbug = 1;
7933 if (would_hit_hwbug) {
7934 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7936 /* If the workaround fails due to memory/mapping
7937 * failure, silently drop this packet.
7939 entry = tnapi->tx_prod;
7940 budget = tg3_tx_avail(tnapi);
7941 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7942 base_flags, mss, vlan))
7946 skb_tx_timestamp(skb);
7947 netdev_tx_sent_queue(txq, skb->len);
7949 /* Sync BD data before updating mailbox */
7952 /* Packets are ready, update Tx producer idx local and on card. */
7953 tw32_tx_mbox(tnapi->prodmbox, entry);
7955 tnapi->tx_prod = entry;
7956 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7957 netif_tx_stop_queue(txq);
7959 /* netif_tx_stop_queue() must be done before checking
7960 * checking tx index in tg3_tx_avail() below, because in
7961 * tg3_tx(), we update tx index before checking for
7962 * netif_tx_queue_stopped().
7965 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7966 netif_tx_wake_queue(txq);
7970 return NETDEV_TX_OK;
7973 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7974 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7979 return NETDEV_TX_OK;
7982 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7985 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7986 MAC_MODE_PORT_MODE_MASK);
7988 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7990 if (!tg3_flag(tp, 5705_PLUS))
7991 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7993 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7998 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8000 if (tg3_flag(tp, 5705_PLUS) ||
8001 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8002 tg3_asic_rev(tp) == ASIC_REV_5700)
8003 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8006 tw32(MAC_MODE, tp->mac_mode);
8010 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8012 u32 val, bmcr, mac_mode, ptest = 0;
8014 tg3_phy_toggle_apd(tp, false);
8015 tg3_phy_toggle_automdix(tp, false);
8017 if (extlpbk && tg3_phy_set_extloopbk(tp))
8020 bmcr = BMCR_FULLDPLX;
8025 bmcr |= BMCR_SPEED100;
8029 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8031 bmcr |= BMCR_SPEED100;
8034 bmcr |= BMCR_SPEED1000;
8039 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8040 tg3_readphy(tp, MII_CTRL1000, &val);
8041 val |= CTL1000_AS_MASTER |
8042 CTL1000_ENABLE_MASTER;
8043 tg3_writephy(tp, MII_CTRL1000, val);
8045 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8046 MII_TG3_FET_PTEST_TRIM_2;
8047 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8050 bmcr |= BMCR_LOOPBACK;
8052 tg3_writephy(tp, MII_BMCR, bmcr);
8054 /* The write needs to be flushed for the FETs */
8055 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8056 tg3_readphy(tp, MII_BMCR, &bmcr);
8060 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8061 tg3_asic_rev(tp) == ASIC_REV_5785) {
8062 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8063 MII_TG3_FET_PTEST_FRC_TX_LINK |
8064 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8066 /* The write needs to be flushed for the AC131 */
8067 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8070 /* Reset to prevent losing 1st rx packet intermittently */
8071 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8072 tg3_flag(tp, 5780_CLASS)) {
8073 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8075 tw32_f(MAC_RX_MODE, tp->rx_mode);
8078 mac_mode = tp->mac_mode &
8079 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8080 if (speed == SPEED_1000)
8081 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8083 mac_mode |= MAC_MODE_PORT_MODE_MII;
8085 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8086 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8088 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8089 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8090 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8091 mac_mode |= MAC_MODE_LINK_POLARITY;
8093 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8094 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8097 tw32(MAC_MODE, mac_mode);
8103 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8105 struct tg3 *tp = netdev_priv(dev);
8107 if (features & NETIF_F_LOOPBACK) {
8108 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8111 spin_lock_bh(&tp->lock);
8112 tg3_mac_loopback(tp, true);
8113 netif_carrier_on(tp->dev);
8114 spin_unlock_bh(&tp->lock);
8115 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8117 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8120 spin_lock_bh(&tp->lock);
8121 tg3_mac_loopback(tp, false);
8122 /* Force link status check */
8123 tg3_setup_phy(tp, true);
8124 spin_unlock_bh(&tp->lock);
8125 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8129 static netdev_features_t tg3_fix_features(struct net_device *dev,
8130 netdev_features_t features)
8132 struct tg3 *tp = netdev_priv(dev);
8134 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8135 features &= ~NETIF_F_ALL_TSO;
8140 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8142 netdev_features_t changed = dev->features ^ features;
8144 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8145 tg3_set_loopback(dev, features);
8150 static void tg3_rx_prodring_free(struct tg3 *tp,
8151 struct tg3_rx_prodring_set *tpr)
8155 if (tpr != &tp->napi[0].prodring) {
8156 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8157 i = (i + 1) & tp->rx_std_ring_mask)
8158 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8161 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8162 for (i = tpr->rx_jmb_cons_idx;
8163 i != tpr->rx_jmb_prod_idx;
8164 i = (i + 1) & tp->rx_jmb_ring_mask) {
8165 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8173 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8174 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8177 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8178 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8179 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8184 /* Initialize rx rings for packet processing.
8186 * The chip has been shut down and the driver detached from
8187 * the networking, so no interrupts or new tx packets will
8188 * end up in the driver. tp->{tx,}lock are held and thus
8191 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8192 struct tg3_rx_prodring_set *tpr)
8194 u32 i, rx_pkt_dma_sz;
8196 tpr->rx_std_cons_idx = 0;
8197 tpr->rx_std_prod_idx = 0;
8198 tpr->rx_jmb_cons_idx = 0;
8199 tpr->rx_jmb_prod_idx = 0;
8201 if (tpr != &tp->napi[0].prodring) {
8202 memset(&tpr->rx_std_buffers[0], 0,
8203 TG3_RX_STD_BUFF_RING_SIZE(tp));
8204 if (tpr->rx_jmb_buffers)
8205 memset(&tpr->rx_jmb_buffers[0], 0,
8206 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8210 /* Zero out all descriptors. */
8211 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8213 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8214 if (tg3_flag(tp, 5780_CLASS) &&
8215 tp->dev->mtu > ETH_DATA_LEN)
8216 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8217 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8219 /* Initialize invariants of the rings, we only set this
8220 * stuff once. This works because the card does not
8221 * write into the rx buffer posting rings.
8223 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8224 struct tg3_rx_buffer_desc *rxd;
8226 rxd = &tpr->rx_std[i];
8227 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8228 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8229 rxd->opaque = (RXD_OPAQUE_RING_STD |
8230 (i << RXD_OPAQUE_INDEX_SHIFT));
8233 /* Now allocate fresh SKBs for each rx ring. */
8234 for (i = 0; i < tp->rx_pending; i++) {
8235 unsigned int frag_size;
8237 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8239 netdev_warn(tp->dev,
8240 "Using a smaller RX standard ring. Only "
8241 "%d out of %d buffers were allocated "
8242 "successfully\n", i, tp->rx_pending);
8250 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8253 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8255 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8258 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8259 struct tg3_rx_buffer_desc *rxd;
8261 rxd = &tpr->rx_jmb[i].std;
8262 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8263 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8265 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8266 (i << RXD_OPAQUE_INDEX_SHIFT));
8269 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8270 unsigned int frag_size;
8272 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8274 netdev_warn(tp->dev,
8275 "Using a smaller RX jumbo ring. Only %d "
8276 "out of %d buffers were allocated "
8277 "successfully\n", i, tp->rx_jumbo_pending);
8280 tp->rx_jumbo_pending = i;
8289 tg3_rx_prodring_free(tp, tpr);
8293 static void tg3_rx_prodring_fini(struct tg3 *tp,
8294 struct tg3_rx_prodring_set *tpr)
8296 kfree(tpr->rx_std_buffers);
8297 tpr->rx_std_buffers = NULL;
8298 kfree(tpr->rx_jmb_buffers);
8299 tpr->rx_jmb_buffers = NULL;
8301 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8302 tpr->rx_std, tpr->rx_std_mapping);
8306 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8307 tpr->rx_jmb, tpr->rx_jmb_mapping);
8312 static int tg3_rx_prodring_init(struct tg3 *tp,
8313 struct tg3_rx_prodring_set *tpr)
8315 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8317 if (!tpr->rx_std_buffers)
8320 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8321 TG3_RX_STD_RING_BYTES(tp),
8322 &tpr->rx_std_mapping,
8327 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8328 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8330 if (!tpr->rx_jmb_buffers)
8333 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8334 TG3_RX_JMB_RING_BYTES(tp),
8335 &tpr->rx_jmb_mapping,
8344 tg3_rx_prodring_fini(tp, tpr);
8348 /* Free up pending packets in all rx/tx rings.
8350 * The chip has been shut down and the driver detached from
8351 * the networking, so no interrupts or new tx packets will
8352 * end up in the driver. tp->{tx,}lock is not held and we are not
8353 * in an interrupt context and thus may sleep.
8355 static void tg3_free_rings(struct tg3 *tp)
8359 for (j = 0; j < tp->irq_cnt; j++) {
8360 struct tg3_napi *tnapi = &tp->napi[j];
8362 tg3_rx_prodring_free(tp, &tnapi->prodring);
8364 if (!tnapi->tx_buffers)
8367 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8368 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8373 tg3_tx_skb_unmap(tnapi, i,
8374 skb_shinfo(skb)->nr_frags - 1);
8376 dev_kfree_skb_any(skb);
8378 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8382 /* Initialize tx/rx rings for packet processing.
8384 * The chip has been shut down and the driver detached from
8385 * the networking, so no interrupts or new tx packets will
8386 * end up in the driver. tp->{tx,}lock are held and thus
8389 static int tg3_init_rings(struct tg3 *tp)
8393 /* Free up all the SKBs. */
8396 for (i = 0; i < tp->irq_cnt; i++) {
8397 struct tg3_napi *tnapi = &tp->napi[i];
8399 tnapi->last_tag = 0;
8400 tnapi->last_irq_tag = 0;
8401 tnapi->hw_status->status = 0;
8402 tnapi->hw_status->status_tag = 0;
8403 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8408 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8410 tnapi->rx_rcb_ptr = 0;
8412 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8414 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8423 static void tg3_mem_tx_release(struct tg3 *tp)
8427 for (i = 0; i < tp->irq_max; i++) {
8428 struct tg3_napi *tnapi = &tp->napi[i];
8430 if (tnapi->tx_ring) {
8431 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8432 tnapi->tx_ring, tnapi->tx_desc_mapping);
8433 tnapi->tx_ring = NULL;
8436 kfree(tnapi->tx_buffers);
8437 tnapi->tx_buffers = NULL;
8441 static int tg3_mem_tx_acquire(struct tg3 *tp)
8444 struct tg3_napi *tnapi = &tp->napi[0];
8446 /* If multivector TSS is enabled, vector 0 does not handle
8447 * tx interrupts. Don't allocate any resources for it.
8449 if (tg3_flag(tp, ENABLE_TSS))
8452 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8453 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8454 TG3_TX_RING_SIZE, GFP_KERNEL);
8455 if (!tnapi->tx_buffers)
8458 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8460 &tnapi->tx_desc_mapping,
8462 if (!tnapi->tx_ring)
8469 tg3_mem_tx_release(tp);
8473 static void tg3_mem_rx_release(struct tg3 *tp)
8477 for (i = 0; i < tp->irq_max; i++) {
8478 struct tg3_napi *tnapi = &tp->napi[i];
8480 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8485 dma_free_coherent(&tp->pdev->dev,
8486 TG3_RX_RCB_RING_BYTES(tp),
8488 tnapi->rx_rcb_mapping);
8489 tnapi->rx_rcb = NULL;
8493 static int tg3_mem_rx_acquire(struct tg3 *tp)
8495 unsigned int i, limit;
8497 limit = tp->rxq_cnt;
8499 /* If RSS is enabled, we need a (dummy) producer ring
8500 * set on vector zero. This is the true hw prodring.
8502 if (tg3_flag(tp, ENABLE_RSS))
8505 for (i = 0; i < limit; i++) {
8506 struct tg3_napi *tnapi = &tp->napi[i];
8508 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8511 /* If multivector RSS is enabled, vector 0
8512 * does not handle rx or tx interrupts.
8513 * Don't allocate any resources for it.
8515 if (!i && tg3_flag(tp, ENABLE_RSS))
8518 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8519 TG3_RX_RCB_RING_BYTES(tp),
8520 &tnapi->rx_rcb_mapping,
8521 GFP_KERNEL | __GFP_ZERO);
8529 tg3_mem_rx_release(tp);
8534 * Must not be invoked with interrupt sources disabled and
8535 * the hardware shutdown down.
8537 static void tg3_free_consistent(struct tg3 *tp)
8541 for (i = 0; i < tp->irq_cnt; i++) {
8542 struct tg3_napi *tnapi = &tp->napi[i];
8544 if (tnapi->hw_status) {
8545 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8547 tnapi->status_mapping);
8548 tnapi->hw_status = NULL;
8552 tg3_mem_rx_release(tp);
8553 tg3_mem_tx_release(tp);
8556 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8557 tp->hw_stats, tp->stats_mapping);
8558 tp->hw_stats = NULL;
8563 * Must not be invoked with interrupt sources disabled and
8564 * the hardware shutdown down. Can sleep.
8566 static int tg3_alloc_consistent(struct tg3 *tp)
8570 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8571 sizeof(struct tg3_hw_stats),
8573 GFP_KERNEL | __GFP_ZERO);
8577 for (i = 0; i < tp->irq_cnt; i++) {
8578 struct tg3_napi *tnapi = &tp->napi[i];
8579 struct tg3_hw_status *sblk;
8581 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8583 &tnapi->status_mapping,
8584 GFP_KERNEL | __GFP_ZERO);
8585 if (!tnapi->hw_status)
8588 sblk = tnapi->hw_status;
8590 if (tg3_flag(tp, ENABLE_RSS)) {
8591 u16 *prodptr = NULL;
8594 * When RSS is enabled, the status block format changes
8595 * slightly. The "rx_jumbo_consumer", "reserved",
8596 * and "rx_mini_consumer" members get mapped to the
8597 * other three rx return ring producer indexes.
8601 prodptr = &sblk->idx[0].rx_producer;
8604 prodptr = &sblk->rx_jumbo_consumer;
8607 prodptr = &sblk->reserved;
8610 prodptr = &sblk->rx_mini_consumer;
8613 tnapi->rx_rcb_prod_idx = prodptr;
8615 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8619 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8625 tg3_free_consistent(tp);
8629 #define MAX_WAIT_CNT 1000
8631 /* To stop a block, clear the enable bit and poll till it
8632 * clears. tp->lock is held.
8634 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8639 if (tg3_flag(tp, 5705_PLUS)) {
8646 /* We can't enable/disable these bits of the
8647 * 5705/5750, just say success.
8660 for (i = 0; i < MAX_WAIT_CNT; i++) {
8663 if ((val & enable_bit) == 0)
8667 if (i == MAX_WAIT_CNT && !silent) {
8668 dev_err(&tp->pdev->dev,
8669 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8677 /* tp->lock is held. */
8678 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8682 tg3_disable_ints(tp);
8684 tp->rx_mode &= ~RX_MODE_ENABLE;
8685 tw32_f(MAC_RX_MODE, tp->rx_mode);
8688 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8689 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8690 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8691 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8692 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8693 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8695 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8696 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8697 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8698 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8699 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8700 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8701 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8703 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8704 tw32_f(MAC_MODE, tp->mac_mode);
8707 tp->tx_mode &= ~TX_MODE_ENABLE;
8708 tw32_f(MAC_TX_MODE, tp->tx_mode);
8710 for (i = 0; i < MAX_WAIT_CNT; i++) {
8712 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8715 if (i >= MAX_WAIT_CNT) {
8716 dev_err(&tp->pdev->dev,
8717 "%s timed out, TX_MODE_ENABLE will not clear "
8718 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8722 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8723 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8724 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8726 tw32(FTQ_RESET, 0xffffffff);
8727 tw32(FTQ_RESET, 0x00000000);
8729 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8730 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8732 for (i = 0; i < tp->irq_cnt; i++) {
8733 struct tg3_napi *tnapi = &tp->napi[i];
8734 if (tnapi->hw_status)
8735 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8741 /* Save PCI command register before chip reset */
8742 static void tg3_save_pci_state(struct tg3 *tp)
8744 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8747 /* Restore PCI state after chip reset */
8748 static void tg3_restore_pci_state(struct tg3 *tp)
8752 /* Re-enable indirect register accesses. */
8753 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8754 tp->misc_host_ctrl);
8756 /* Set MAX PCI retry to zero. */
8757 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8758 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8759 tg3_flag(tp, PCIX_MODE))
8760 val |= PCISTATE_RETRY_SAME_DMA;
8761 /* Allow reads and writes to the APE register and memory space. */
8762 if (tg3_flag(tp, ENABLE_APE))
8763 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8764 PCISTATE_ALLOW_APE_SHMEM_WR |
8765 PCISTATE_ALLOW_APE_PSPACE_WR;
8766 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8768 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8770 if (!tg3_flag(tp, PCI_EXPRESS)) {
8771 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8772 tp->pci_cacheline_sz);
8773 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8777 /* Make sure PCI-X relaxed ordering bit is clear. */
8778 if (tg3_flag(tp, PCIX_MODE)) {
8781 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8783 pcix_cmd &= ~PCI_X_CMD_ERO;
8784 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8788 if (tg3_flag(tp, 5780_CLASS)) {
8790 /* Chip reset on 5780 will reset MSI enable bit,
8791 * so need to restore it.
8793 if (tg3_flag(tp, USING_MSI)) {
8796 pci_read_config_word(tp->pdev,
8797 tp->msi_cap + PCI_MSI_FLAGS,
8799 pci_write_config_word(tp->pdev,
8800 tp->msi_cap + PCI_MSI_FLAGS,
8801 ctrl | PCI_MSI_FLAGS_ENABLE);
8802 val = tr32(MSGINT_MODE);
8803 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8808 /* tp->lock is held. */
8809 static int tg3_chip_reset(struct tg3 *tp)
8812 void (*write_op)(struct tg3 *, u32, u32);
8817 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8819 /* No matching tg3_nvram_unlock() after this because
8820 * chip reset below will undo the nvram lock.
8822 tp->nvram_lock_cnt = 0;
8824 /* GRC_MISC_CFG core clock reset will clear the memory
8825 * enable bit in PCI register 4 and the MSI enable bit
8826 * on some chips, so we save relevant registers here.
8828 tg3_save_pci_state(tp);
8830 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8831 tg3_flag(tp, 5755_PLUS))
8832 tw32(GRC_FASTBOOT_PC, 0);
8835 * We must avoid the readl() that normally takes place.
8836 * It locks machines, causes machine checks, and other
8837 * fun things. So, temporarily disable the 5701
8838 * hardware workaround, while we do the reset.
8840 write_op = tp->write32;
8841 if (write_op == tg3_write_flush_reg32)
8842 tp->write32 = tg3_write32;
8844 /* Prevent the irq handler from reading or writing PCI registers
8845 * during chip reset when the memory enable bit in the PCI command
8846 * register may be cleared. The chip does not generate interrupt
8847 * at this time, but the irq handler may still be called due to irq
8848 * sharing or irqpoll.
8850 tg3_flag_set(tp, CHIP_RESETTING);
8851 for (i = 0; i < tp->irq_cnt; i++) {
8852 struct tg3_napi *tnapi = &tp->napi[i];
8853 if (tnapi->hw_status) {
8854 tnapi->hw_status->status = 0;
8855 tnapi->hw_status->status_tag = 0;
8857 tnapi->last_tag = 0;
8858 tnapi->last_irq_tag = 0;
8862 for (i = 0; i < tp->irq_cnt; i++)
8863 synchronize_irq(tp->napi[i].irq_vec);
8865 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8866 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8867 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8871 val = GRC_MISC_CFG_CORECLK_RESET;
8873 if (tg3_flag(tp, PCI_EXPRESS)) {
8874 /* Force PCIe 1.0a mode */
8875 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8876 !tg3_flag(tp, 57765_PLUS) &&
8877 tr32(TG3_PCIE_PHY_TSTCTL) ==
8878 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8879 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8881 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8882 tw32(GRC_MISC_CFG, (1 << 29));
8887 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8888 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8889 tw32(GRC_VCPU_EXT_CTRL,
8890 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8893 /* Manage gphy power for all CPMU absent PCIe devices. */
8894 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8895 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8897 tw32(GRC_MISC_CFG, val);
8899 /* restore 5701 hardware bug workaround write method */
8900 tp->write32 = write_op;
8902 /* Unfortunately, we have to delay before the PCI read back.
8903 * Some 575X chips even will not respond to a PCI cfg access
8904 * when the reset command is given to the chip.
8906 * How do these hardware designers expect things to work
8907 * properly if the PCI write is posted for a long period
8908 * of time? It is always necessary to have some method by
8909 * which a register read back can occur to push the write
8910 * out which does the reset.
8912 * For most tg3 variants the trick below was working.
8917 /* Flush PCI posted writes. The normal MMIO registers
8918 * are inaccessible at this time so this is the only
8919 * way to make this reliably (actually, this is no longer
8920 * the case, see above). I tried to use indirect
8921 * register read/write but this upset some 5701 variants.
8923 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8927 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8930 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8934 /* Wait for link training to complete. */
8935 for (j = 0; j < 5000; j++)
8938 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8939 pci_write_config_dword(tp->pdev, 0xc4,
8940 cfg_val | (1 << 15));
8943 /* Clear the "no snoop" and "relaxed ordering" bits. */
8944 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8946 * Older PCIe devices only support the 128 byte
8947 * MPS setting. Enforce the restriction.
8949 if (!tg3_flag(tp, CPMU_PRESENT))
8950 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8951 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8953 /* Clear error status */
8954 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8955 PCI_EXP_DEVSTA_CED |
8956 PCI_EXP_DEVSTA_NFED |
8957 PCI_EXP_DEVSTA_FED |
8958 PCI_EXP_DEVSTA_URD);
8961 tg3_restore_pci_state(tp);
8963 tg3_flag_clear(tp, CHIP_RESETTING);
8964 tg3_flag_clear(tp, ERROR_PROCESSED);
8967 if (tg3_flag(tp, 5780_CLASS))
8968 val = tr32(MEMARB_MODE);
8969 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8971 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8973 tw32(0x5000, 0x400);
8976 if (tg3_flag(tp, IS_SSB_CORE)) {
8978 * BCM4785: In order to avoid repercussions from using
8979 * potentially defective internal ROM, stop the Rx RISC CPU,
8980 * which is not required.
8983 tg3_halt_cpu(tp, RX_CPU_BASE);
8986 err = tg3_poll_fw(tp);
8990 tw32(GRC_MODE, tp->grc_mode);
8992 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8995 tw32(0xc4, val | (1 << 15));
8998 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8999 tg3_asic_rev(tp) == ASIC_REV_5705) {
9000 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9001 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9002 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9003 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9006 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9007 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9009 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9010 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9015 tw32_f(MAC_MODE, val);
9018 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9022 if (tg3_flag(tp, PCI_EXPRESS) &&
9023 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9024 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9025 !tg3_flag(tp, 57765_PLUS)) {
9028 tw32(0x7c00, val | (1 << 25));
9031 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9032 val = tr32(TG3_CPMU_CLCK_ORIDE);
9033 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9036 /* Reprobe ASF enable state. */
9037 tg3_flag_clear(tp, ENABLE_ASF);
9038 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9039 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9041 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9042 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9043 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9046 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9047 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9048 tg3_flag_set(tp, ENABLE_ASF);
9049 tp->last_event_jiffies = jiffies;
9050 if (tg3_flag(tp, 5750_PLUS))
9051 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9053 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9054 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9055 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9056 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9057 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9064 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9065 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9067 /* tp->lock is held. */
9068 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9074 tg3_write_sig_pre_reset(tp, kind);
9076 tg3_abort_hw(tp, silent);
9077 err = tg3_chip_reset(tp);
9079 __tg3_set_mac_addr(tp, false);
9081 tg3_write_sig_legacy(tp, kind);
9082 tg3_write_sig_post_reset(tp, kind);
9085 /* Save the stats across chip resets... */
9086 tg3_get_nstats(tp, &tp->net_stats_prev);
9087 tg3_get_estats(tp, &tp->estats_prev);
9089 /* And make sure the next sample is new data */
9090 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9099 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9101 struct tg3 *tp = netdev_priv(dev);
9102 struct sockaddr *addr = p;
9104 bool skip_mac_1 = false;
9106 if (!is_valid_ether_addr(addr->sa_data))
9107 return -EADDRNOTAVAIL;
9109 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9111 if (!netif_running(dev))
9114 if (tg3_flag(tp, ENABLE_ASF)) {
9115 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9117 addr0_high = tr32(MAC_ADDR_0_HIGH);
9118 addr0_low = tr32(MAC_ADDR_0_LOW);
9119 addr1_high = tr32(MAC_ADDR_1_HIGH);
9120 addr1_low = tr32(MAC_ADDR_1_LOW);
9122 /* Skip MAC addr 1 if ASF is using it. */
9123 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9124 !(addr1_high == 0 && addr1_low == 0))
9127 spin_lock_bh(&tp->lock);
9128 __tg3_set_mac_addr(tp, skip_mac_1);
9129 spin_unlock_bh(&tp->lock);
9134 /* tp->lock is held. */
9135 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9136 dma_addr_t mapping, u32 maxlen_flags,
9140 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9141 ((u64) mapping >> 32));
9143 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9144 ((u64) mapping & 0xffffffff));
9146 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9149 if (!tg3_flag(tp, 5705_PLUS))
9151 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9156 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9160 if (!tg3_flag(tp, ENABLE_TSS)) {
9161 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9162 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9163 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9165 tw32(HOSTCC_TXCOL_TICKS, 0);
9166 tw32(HOSTCC_TXMAX_FRAMES, 0);
9167 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9169 for (; i < tp->txq_cnt; i++) {
9172 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9173 tw32(reg, ec->tx_coalesce_usecs);
9174 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9175 tw32(reg, ec->tx_max_coalesced_frames);
9176 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9177 tw32(reg, ec->tx_max_coalesced_frames_irq);
9181 for (; i < tp->irq_max - 1; i++) {
9182 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9183 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9184 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9188 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9191 u32 limit = tp->rxq_cnt;
9193 if (!tg3_flag(tp, ENABLE_RSS)) {
9194 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9195 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9196 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9199 tw32(HOSTCC_RXCOL_TICKS, 0);
9200 tw32(HOSTCC_RXMAX_FRAMES, 0);
9201 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9204 for (; i < limit; i++) {
9207 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9208 tw32(reg, ec->rx_coalesce_usecs);
9209 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9210 tw32(reg, ec->rx_max_coalesced_frames);
9211 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9212 tw32(reg, ec->rx_max_coalesced_frames_irq);
9215 for (; i < tp->irq_max - 1; i++) {
9216 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9217 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9218 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9222 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9224 tg3_coal_tx_init(tp, ec);
9225 tg3_coal_rx_init(tp, ec);
9227 if (!tg3_flag(tp, 5705_PLUS)) {
9228 u32 val = ec->stats_block_coalesce_usecs;
9230 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9231 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9236 tw32(HOSTCC_STAT_COAL_TICKS, val);
9240 /* tp->lock is held. */
9241 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9245 /* Disable all transmit rings but the first. */
9246 if (!tg3_flag(tp, 5705_PLUS))
9247 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9248 else if (tg3_flag(tp, 5717_PLUS))
9249 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9250 else if (tg3_flag(tp, 57765_CLASS) ||
9251 tg3_asic_rev(tp) == ASIC_REV_5762)
9252 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9254 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9256 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9257 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9258 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259 BDINFO_FLAGS_DISABLED);
9262 /* tp->lock is held. */
9263 static void tg3_tx_rcbs_init(struct tg3 *tp)
9266 u32 txrcb = NIC_SRAM_SEND_RCB;
9268 if (tg3_flag(tp, ENABLE_TSS))
9271 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9272 struct tg3_napi *tnapi = &tp->napi[i];
9274 if (!tnapi->tx_ring)
9277 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9278 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9279 NIC_SRAM_TX_BUFFER_DESC);
9283 /* tp->lock is held. */
9284 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9288 /* Disable all receive return rings but the first. */
9289 if (tg3_flag(tp, 5717_PLUS))
9290 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9291 else if (!tg3_flag(tp, 5705_PLUS))
9292 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9293 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9294 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9295 tg3_flag(tp, 57765_CLASS))
9296 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9298 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9300 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9301 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9302 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9303 BDINFO_FLAGS_DISABLED);
9306 /* tp->lock is held. */
9307 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9310 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9312 if (tg3_flag(tp, ENABLE_RSS))
9315 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9316 struct tg3_napi *tnapi = &tp->napi[i];
9321 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9322 (tp->rx_ret_ring_mask + 1) <<
9323 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9327 /* tp->lock is held. */
9328 static void tg3_rings_reset(struct tg3 *tp)
9332 struct tg3_napi *tnapi = &tp->napi[0];
9334 tg3_tx_rcbs_disable(tp);
9336 tg3_rx_ret_rcbs_disable(tp);
9338 /* Disable interrupts */
9339 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9340 tp->napi[0].chk_msi_cnt = 0;
9341 tp->napi[0].last_rx_cons = 0;
9342 tp->napi[0].last_tx_cons = 0;
9344 /* Zero mailbox registers. */
9345 if (tg3_flag(tp, SUPPORT_MSIX)) {
9346 for (i = 1; i < tp->irq_max; i++) {
9347 tp->napi[i].tx_prod = 0;
9348 tp->napi[i].tx_cons = 0;
9349 if (tg3_flag(tp, ENABLE_TSS))
9350 tw32_mailbox(tp->napi[i].prodmbox, 0);
9351 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9352 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9353 tp->napi[i].chk_msi_cnt = 0;
9354 tp->napi[i].last_rx_cons = 0;
9355 tp->napi[i].last_tx_cons = 0;
9357 if (!tg3_flag(tp, ENABLE_TSS))
9358 tw32_mailbox(tp->napi[0].prodmbox, 0);
9360 tp->napi[0].tx_prod = 0;
9361 tp->napi[0].tx_cons = 0;
9362 tw32_mailbox(tp->napi[0].prodmbox, 0);
9363 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9366 /* Make sure the NIC-based send BD rings are disabled. */
9367 if (!tg3_flag(tp, 5705_PLUS)) {
9368 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9369 for (i = 0; i < 16; i++)
9370 tw32_tx_mbox(mbox + i * 8, 0);
9373 /* Clear status block in ram. */
9374 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9376 /* Set status block DMA address */
9377 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9378 ((u64) tnapi->status_mapping >> 32));
9379 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9380 ((u64) tnapi->status_mapping & 0xffffffff));
9382 stblk = HOSTCC_STATBLCK_RING1;
9384 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9385 u64 mapping = (u64)tnapi->status_mapping;
9386 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9387 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9390 /* Clear status block in ram. */
9391 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9394 tg3_tx_rcbs_init(tp);
9395 tg3_rx_ret_rcbs_init(tp);
9398 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9400 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9402 if (!tg3_flag(tp, 5750_PLUS) ||
9403 tg3_flag(tp, 5780_CLASS) ||
9404 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9405 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9406 tg3_flag(tp, 57765_PLUS))
9407 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9408 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9409 tg3_asic_rev(tp) == ASIC_REV_5787)
9410 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9412 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9414 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9415 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9417 val = min(nic_rep_thresh, host_rep_thresh);
9418 tw32(RCVBDI_STD_THRESH, val);
9420 if (tg3_flag(tp, 57765_PLUS))
9421 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9423 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9426 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9428 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9430 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9431 tw32(RCVBDI_JUMBO_THRESH, val);
9433 if (tg3_flag(tp, 57765_PLUS))
9434 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9437 static inline u32 calc_crc(unsigned char *buf, int len)
9445 for (j = 0; j < len; j++) {
9448 for (k = 0; k < 8; k++) {
9461 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9463 /* accept or reject all multicast frames */
9464 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9465 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9466 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9467 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9470 static void __tg3_set_rx_mode(struct net_device *dev)
9472 struct tg3 *tp = netdev_priv(dev);
9475 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9476 RX_MODE_KEEP_VLAN_TAG);
9478 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9479 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9482 if (!tg3_flag(tp, ENABLE_ASF))
9483 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9486 if (dev->flags & IFF_PROMISC) {
9487 /* Promiscuous mode. */
9488 rx_mode |= RX_MODE_PROMISC;
9489 } else if (dev->flags & IFF_ALLMULTI) {
9490 /* Accept all multicast. */
9491 tg3_set_multi(tp, 1);
9492 } else if (netdev_mc_empty(dev)) {
9493 /* Reject all multicast. */
9494 tg3_set_multi(tp, 0);
9496 /* Accept one or more multicast(s). */
9497 struct netdev_hw_addr *ha;
9498 u32 mc_filter[4] = { 0, };
9503 netdev_for_each_mc_addr(ha, dev) {
9504 crc = calc_crc(ha->addr, ETH_ALEN);
9506 regidx = (bit & 0x60) >> 5;
9508 mc_filter[regidx] |= (1 << bit);
9511 tw32(MAC_HASH_REG_0, mc_filter[0]);
9512 tw32(MAC_HASH_REG_1, mc_filter[1]);
9513 tw32(MAC_HASH_REG_2, mc_filter[2]);
9514 tw32(MAC_HASH_REG_3, mc_filter[3]);
9517 if (rx_mode != tp->rx_mode) {
9518 tp->rx_mode = rx_mode;
9519 tw32_f(MAC_RX_MODE, rx_mode);
9524 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9528 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9529 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9532 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9536 if (!tg3_flag(tp, SUPPORT_MSIX))
9539 if (tp->rxq_cnt == 1) {
9540 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9544 /* Validate table against current IRQ count */
9545 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9546 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9550 if (i != TG3_RSS_INDIR_TBL_SIZE)
9551 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9554 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9557 u32 reg = MAC_RSS_INDIR_TBL_0;
9559 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9560 u32 val = tp->rss_ind_tbl[i];
9562 for (; i % 8; i++) {
9564 val |= tp->rss_ind_tbl[i];
9571 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9573 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9574 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9576 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9579 /* tp->lock is held. */
9580 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9582 u32 val, rdmac_mode;
9584 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9586 tg3_disable_ints(tp);
9590 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9592 if (tg3_flag(tp, INIT_COMPLETE))
9593 tg3_abort_hw(tp, 1);
9595 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9596 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9597 tg3_phy_pull_config(tp);
9598 tg3_eee_pull_config(tp, NULL);
9599 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9602 /* Enable MAC control of LPI */
9603 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9609 err = tg3_chip_reset(tp);
9613 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9615 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9616 val = tr32(TG3_CPMU_CTRL);
9617 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9618 tw32(TG3_CPMU_CTRL, val);
9620 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9621 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9622 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9623 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9625 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9626 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9627 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9628 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9630 val = tr32(TG3_CPMU_HST_ACC);
9631 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9632 val |= CPMU_HST_ACC_MACCLK_6_25;
9633 tw32(TG3_CPMU_HST_ACC, val);
9636 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9637 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9638 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9639 PCIE_PWR_MGMT_L1_THRESH_4MS;
9640 tw32(PCIE_PWR_MGMT_THRESH, val);
9642 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9643 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9645 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9647 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9648 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9651 if (tg3_flag(tp, L1PLLPD_EN)) {
9652 u32 grc_mode = tr32(GRC_MODE);
9654 /* Access the lower 1K of PL PCIE block registers. */
9655 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9656 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9658 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9659 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9660 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9662 tw32(GRC_MODE, grc_mode);
9665 if (tg3_flag(tp, 57765_CLASS)) {
9666 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9667 u32 grc_mode = tr32(GRC_MODE);
9669 /* Access the lower 1K of PL PCIE block registers. */
9670 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9671 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9673 val = tr32(TG3_PCIE_TLDLPL_PORT +
9674 TG3_PCIE_PL_LO_PHYCTL5);
9675 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9676 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9678 tw32(GRC_MODE, grc_mode);
9681 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9684 /* Fix transmit hangs */
9685 val = tr32(TG3_CPMU_PADRNG_CTL);
9686 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9687 tw32(TG3_CPMU_PADRNG_CTL, val);
9689 grc_mode = tr32(GRC_MODE);
9691 /* Access the lower 1K of DL PCIE block registers. */
9692 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9693 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9695 val = tr32(TG3_PCIE_TLDLPL_PORT +
9696 TG3_PCIE_DL_LO_FTSMAX);
9697 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9698 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9699 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9701 tw32(GRC_MODE, grc_mode);
9704 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9705 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9706 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9707 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9710 /* This works around an issue with Athlon chipsets on
9711 * B3 tigon3 silicon. This bit has no effect on any
9712 * other revision. But do not set this on PCI Express
9713 * chips and don't even touch the clocks if the CPMU is present.
9715 if (!tg3_flag(tp, CPMU_PRESENT)) {
9716 if (!tg3_flag(tp, PCI_EXPRESS))
9717 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9718 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9721 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9722 tg3_flag(tp, PCIX_MODE)) {
9723 val = tr32(TG3PCI_PCISTATE);
9724 val |= PCISTATE_RETRY_SAME_DMA;
9725 tw32(TG3PCI_PCISTATE, val);
9728 if (tg3_flag(tp, ENABLE_APE)) {
9729 /* Allow reads and writes to the
9730 * APE register and memory space.
9732 val = tr32(TG3PCI_PCISTATE);
9733 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9734 PCISTATE_ALLOW_APE_SHMEM_WR |
9735 PCISTATE_ALLOW_APE_PSPACE_WR;
9736 tw32(TG3PCI_PCISTATE, val);
9739 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9740 /* Enable some hw fixes. */
9741 val = tr32(TG3PCI_MSI_DATA);
9742 val |= (1 << 26) | (1 << 28) | (1 << 29);
9743 tw32(TG3PCI_MSI_DATA, val);
9746 /* Descriptor ring init may make accesses to the
9747 * NIC SRAM area to setup the TX descriptors, so we
9748 * can only do this after the hardware has been
9749 * successfully reset.
9751 err = tg3_init_rings(tp);
9755 if (tg3_flag(tp, 57765_PLUS)) {
9756 val = tr32(TG3PCI_DMA_RW_CTRL) &
9757 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9758 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9759 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9760 if (!tg3_flag(tp, 57765_CLASS) &&
9761 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9762 tg3_asic_rev(tp) != ASIC_REV_5762)
9763 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9764 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9765 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9766 tg3_asic_rev(tp) != ASIC_REV_5761) {
9767 /* This value is determined during the probe time DMA
9768 * engine test, tg3_test_dma.
9770 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9773 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9774 GRC_MODE_4X_NIC_SEND_RINGS |
9775 GRC_MODE_NO_TX_PHDR_CSUM |
9776 GRC_MODE_NO_RX_PHDR_CSUM);
9777 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9779 /* Pseudo-header checksum is done by hardware logic and not
9780 * the offload processers, so make the chip do the pseudo-
9781 * header checksums on receive. For transmit it is more
9782 * convenient to do the pseudo-header checksum in software
9783 * as Linux does that on transmit for us in all cases.
9785 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9787 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9789 tw32(TG3_RX_PTP_CTL,
9790 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9792 if (tg3_flag(tp, PTP_CAPABLE))
9793 val |= GRC_MODE_TIME_SYNC_ENABLE;
9795 tw32(GRC_MODE, tp->grc_mode | val);
9797 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9798 val = tr32(GRC_MISC_CFG);
9800 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9801 tw32(GRC_MISC_CFG, val);
9803 /* Initialize MBUF/DESC pool. */
9804 if (tg3_flag(tp, 5750_PLUS)) {
9806 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9807 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9808 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9809 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9811 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9812 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9813 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9814 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9817 fw_len = tp->fw_len;
9818 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9819 tw32(BUFMGR_MB_POOL_ADDR,
9820 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9821 tw32(BUFMGR_MB_POOL_SIZE,
9822 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9825 if (tp->dev->mtu <= ETH_DATA_LEN) {
9826 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9827 tp->bufmgr_config.mbuf_read_dma_low_water);
9828 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9829 tp->bufmgr_config.mbuf_mac_rx_low_water);
9830 tw32(BUFMGR_MB_HIGH_WATER,
9831 tp->bufmgr_config.mbuf_high_water);
9833 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9834 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9835 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9836 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9837 tw32(BUFMGR_MB_HIGH_WATER,
9838 tp->bufmgr_config.mbuf_high_water_jumbo);
9840 tw32(BUFMGR_DMA_LOW_WATER,
9841 tp->bufmgr_config.dma_low_water);
9842 tw32(BUFMGR_DMA_HIGH_WATER,
9843 tp->bufmgr_config.dma_high_water);
9845 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9846 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9847 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9848 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9849 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9850 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9851 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9852 tw32(BUFMGR_MODE, val);
9853 for (i = 0; i < 2000; i++) {
9854 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9859 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9864 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9866 tg3_setup_rxbd_thresholds(tp);
9868 /* Initialize TG3_BDINFO's at:
9869 * RCVDBDI_STD_BD: standard eth size rx ring
9870 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9871 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9874 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9875 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9876 * ring attribute flags
9877 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9879 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9880 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9882 * The size of each ring is fixed in the firmware, but the location is
9885 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9886 ((u64) tpr->rx_std_mapping >> 32));
9887 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9888 ((u64) tpr->rx_std_mapping & 0xffffffff));
9889 if (!tg3_flag(tp, 5717_PLUS))
9890 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9891 NIC_SRAM_RX_BUFFER_DESC);
9893 /* Disable the mini ring */
9894 if (!tg3_flag(tp, 5705_PLUS))
9895 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9896 BDINFO_FLAGS_DISABLED);
9898 /* Program the jumbo buffer descriptor ring control
9899 * blocks on those devices that have them.
9901 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9902 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9904 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9905 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9906 ((u64) tpr->rx_jmb_mapping >> 32));
9907 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9908 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9909 val = TG3_RX_JMB_RING_SIZE(tp) <<
9910 BDINFO_FLAGS_MAXLEN_SHIFT;
9911 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9912 val | BDINFO_FLAGS_USE_EXT_RECV);
9913 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9914 tg3_flag(tp, 57765_CLASS) ||
9915 tg3_asic_rev(tp) == ASIC_REV_5762)
9916 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9917 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9919 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9920 BDINFO_FLAGS_DISABLED);
9923 if (tg3_flag(tp, 57765_PLUS)) {
9924 val = TG3_RX_STD_RING_SIZE(tp);
9925 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9926 val |= (TG3_RX_STD_DMA_SZ << 2);
9928 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9930 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9932 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9934 tpr->rx_std_prod_idx = tp->rx_pending;
9935 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9937 tpr->rx_jmb_prod_idx =
9938 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9939 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9941 tg3_rings_reset(tp);
9943 /* Initialize MAC address and backoff seed. */
9944 __tg3_set_mac_addr(tp, false);
9946 /* MTU + ethernet header + FCS + optional VLAN tag */
9947 tw32(MAC_RX_MTU_SIZE,
9948 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9950 /* The slot time is changed by tg3_setup_phy if we
9951 * run at gigabit with half duplex.
9953 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9954 (6 << TX_LENGTHS_IPG_SHIFT) |
9955 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9957 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9958 tg3_asic_rev(tp) == ASIC_REV_5762)
9959 val |= tr32(MAC_TX_LENGTHS) &
9960 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9961 TX_LENGTHS_CNT_DWN_VAL_MSK);
9963 tw32(MAC_TX_LENGTHS, val);
9965 /* Receive rules. */
9966 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9967 tw32(RCVLPC_CONFIG, 0x0181);
9969 /* Calculate RDMAC_MODE setting early, we need it to determine
9970 * the RCVLPC_STATE_ENABLE mask.
9972 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9973 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9974 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9975 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9976 RDMAC_MODE_LNGREAD_ENAB);
9978 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9979 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9981 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9982 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9983 tg3_asic_rev(tp) == ASIC_REV_57780)
9984 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9985 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9986 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9988 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9989 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9990 if (tg3_flag(tp, TSO_CAPABLE) &&
9991 tg3_asic_rev(tp) == ASIC_REV_5705) {
9992 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9993 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9994 !tg3_flag(tp, IS_5788)) {
9995 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9999 if (tg3_flag(tp, PCI_EXPRESS))
10000 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10002 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10004 if (tp->dev->mtu <= ETH_DATA_LEN) {
10005 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10006 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10010 if (tg3_flag(tp, HW_TSO_1) ||
10011 tg3_flag(tp, HW_TSO_2) ||
10012 tg3_flag(tp, HW_TSO_3))
10013 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10015 if (tg3_flag(tp, 57765_PLUS) ||
10016 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10017 tg3_asic_rev(tp) == ASIC_REV_57780)
10018 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10020 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10021 tg3_asic_rev(tp) == ASIC_REV_5762)
10022 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10024 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10025 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10026 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10027 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10028 tg3_flag(tp, 57765_PLUS)) {
10031 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10032 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10034 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10036 val = tr32(tgtreg);
10037 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10038 tg3_asic_rev(tp) == ASIC_REV_5762) {
10039 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10040 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10041 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10042 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10043 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10044 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10046 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10049 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10050 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10051 tg3_asic_rev(tp) == ASIC_REV_5762) {
10054 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10055 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10057 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10059 val = tr32(tgtreg);
10061 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10062 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10065 /* Receive/send statistics. */
10066 if (tg3_flag(tp, 5750_PLUS)) {
10067 val = tr32(RCVLPC_STATS_ENABLE);
10068 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10069 tw32(RCVLPC_STATS_ENABLE, val);
10070 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10071 tg3_flag(tp, TSO_CAPABLE)) {
10072 val = tr32(RCVLPC_STATS_ENABLE);
10073 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10074 tw32(RCVLPC_STATS_ENABLE, val);
10076 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10078 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10079 tw32(SNDDATAI_STATSENAB, 0xffffff);
10080 tw32(SNDDATAI_STATSCTRL,
10081 (SNDDATAI_SCTRL_ENABLE |
10082 SNDDATAI_SCTRL_FASTUPD));
10084 /* Setup host coalescing engine. */
10085 tw32(HOSTCC_MODE, 0);
10086 for (i = 0; i < 2000; i++) {
10087 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10092 __tg3_set_coalesce(tp, &tp->coal);
10094 if (!tg3_flag(tp, 5705_PLUS)) {
10095 /* Status/statistics block address. See tg3_timer,
10096 * the tg3_periodic_fetch_stats call there, and
10097 * tg3_get_stats to see how this works for 5705/5750 chips.
10099 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10100 ((u64) tp->stats_mapping >> 32));
10101 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10102 ((u64) tp->stats_mapping & 0xffffffff));
10103 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10105 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10107 /* Clear statistics and status block memory areas */
10108 for (i = NIC_SRAM_STATS_BLK;
10109 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10110 i += sizeof(u32)) {
10111 tg3_write_mem(tp, i, 0);
10116 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10118 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10119 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10120 if (!tg3_flag(tp, 5705_PLUS))
10121 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10123 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10124 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10125 /* reset to prevent losing 1st rx packet intermittently */
10126 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10130 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10131 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10132 MAC_MODE_FHDE_ENABLE;
10133 if (tg3_flag(tp, ENABLE_APE))
10134 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10135 if (!tg3_flag(tp, 5705_PLUS) &&
10136 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10137 tg3_asic_rev(tp) != ASIC_REV_5700)
10138 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10139 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10142 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10143 * If TG3_FLAG_IS_NIC is zero, we should read the
10144 * register to preserve the GPIO settings for LOMs. The GPIOs,
10145 * whether used as inputs or outputs, are set by boot code after
10148 if (!tg3_flag(tp, IS_NIC)) {
10151 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10152 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10153 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10155 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10156 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10157 GRC_LCLCTRL_GPIO_OUTPUT3;
10159 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10160 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10162 tp->grc_local_ctrl &= ~gpio_mask;
10163 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10165 /* GPIO1 must be driven high for eeprom write protect */
10166 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10167 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10168 GRC_LCLCTRL_GPIO_OUTPUT1);
10170 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10173 if (tg3_flag(tp, USING_MSIX)) {
10174 val = tr32(MSGINT_MODE);
10175 val |= MSGINT_MODE_ENABLE;
10176 if (tp->irq_cnt > 1)
10177 val |= MSGINT_MODE_MULTIVEC_EN;
10178 if (!tg3_flag(tp, 1SHOT_MSI))
10179 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10180 tw32(MSGINT_MODE, val);
10183 if (!tg3_flag(tp, 5705_PLUS)) {
10184 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10188 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10189 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10190 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10191 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10192 WDMAC_MODE_LNGREAD_ENAB);
10194 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10195 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10196 if (tg3_flag(tp, TSO_CAPABLE) &&
10197 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10198 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10200 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10201 !tg3_flag(tp, IS_5788)) {
10202 val |= WDMAC_MODE_RX_ACCEL;
10206 /* Enable host coalescing bug fix */
10207 if (tg3_flag(tp, 5755_PLUS))
10208 val |= WDMAC_MODE_STATUS_TAG_FIX;
10210 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10211 val |= WDMAC_MODE_BURST_ALL_DATA;
10213 tw32_f(WDMAC_MODE, val);
10216 if (tg3_flag(tp, PCIX_MODE)) {
10219 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10221 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10222 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10223 pcix_cmd |= PCI_X_CMD_READ_2K;
10224 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10225 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10226 pcix_cmd |= PCI_X_CMD_READ_2K;
10228 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10232 tw32_f(RDMAC_MODE, rdmac_mode);
10235 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10236 tg3_asic_rev(tp) == ASIC_REV_5720) {
10237 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10238 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10241 if (i < TG3_NUM_RDMA_CHANNELS) {
10242 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10243 val |= tg3_lso_rd_dma_workaround_bit(tp);
10244 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10245 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10249 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10250 if (!tg3_flag(tp, 5705_PLUS))
10251 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10253 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10254 tw32(SNDDATAC_MODE,
10255 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10257 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10259 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10260 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10261 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10262 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10263 val |= RCVDBDI_MODE_LRG_RING_SZ;
10264 tw32(RCVDBDI_MODE, val);
10265 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10266 if (tg3_flag(tp, HW_TSO_1) ||
10267 tg3_flag(tp, HW_TSO_2) ||
10268 tg3_flag(tp, HW_TSO_3))
10269 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10270 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10271 if (tg3_flag(tp, ENABLE_TSS))
10272 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10273 tw32(SNDBDI_MODE, val);
10274 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10276 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10277 err = tg3_load_5701_a0_firmware_fix(tp);
10282 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10283 /* Ignore any errors for the firmware download. If download
10284 * fails, the device will operate with EEE disabled
10286 tg3_load_57766_firmware(tp);
10289 if (tg3_flag(tp, TSO_CAPABLE)) {
10290 err = tg3_load_tso_firmware(tp);
10295 tp->tx_mode = TX_MODE_ENABLE;
10297 if (tg3_flag(tp, 5755_PLUS) ||
10298 tg3_asic_rev(tp) == ASIC_REV_5906)
10299 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10301 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10302 tg3_asic_rev(tp) == ASIC_REV_5762) {
10303 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10304 tp->tx_mode &= ~val;
10305 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10308 tw32_f(MAC_TX_MODE, tp->tx_mode);
10311 if (tg3_flag(tp, ENABLE_RSS)) {
10312 tg3_rss_write_indir_tbl(tp);
10314 /* Setup the "secret" hash key. */
10315 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10316 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10317 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10318 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10319 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10320 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10321 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10322 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10323 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10324 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10327 tp->rx_mode = RX_MODE_ENABLE;
10328 if (tg3_flag(tp, 5755_PLUS))
10329 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10331 if (tg3_flag(tp, ENABLE_RSS))
10332 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10333 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10334 RX_MODE_RSS_IPV6_HASH_EN |
10335 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10336 RX_MODE_RSS_IPV4_HASH_EN |
10337 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10339 tw32_f(MAC_RX_MODE, tp->rx_mode);
10342 tw32(MAC_LED_CTRL, tp->led_ctrl);
10344 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10345 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10346 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10349 tw32_f(MAC_RX_MODE, tp->rx_mode);
10352 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10353 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10354 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10355 /* Set drive transmission level to 1.2V */
10356 /* only if the signal pre-emphasis bit is not set */
10357 val = tr32(MAC_SERDES_CFG);
10360 tw32(MAC_SERDES_CFG, val);
10362 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10363 tw32(MAC_SERDES_CFG, 0x616000);
10366 /* Prevent chip from dropping frames when flow control
10369 if (tg3_flag(tp, 57765_CLASS))
10373 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10375 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10376 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10377 /* Use hardware link auto-negotiation */
10378 tg3_flag_set(tp, HW_AUTONEG);
10381 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10382 tg3_asic_rev(tp) == ASIC_REV_5714) {
10385 tmp = tr32(SERDES_RX_CTRL);
10386 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10387 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10388 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10389 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10392 if (!tg3_flag(tp, USE_PHYLIB)) {
10393 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10394 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10396 err = tg3_setup_phy(tp, false);
10400 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10401 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10404 /* Clear CRC stats. */
10405 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10406 tg3_writephy(tp, MII_TG3_TEST1,
10407 tmp | MII_TG3_TEST1_CRC_EN);
10408 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10413 __tg3_set_rx_mode(tp->dev);
10415 /* Initialize receive rules. */
10416 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10417 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10418 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10419 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10421 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10425 if (tg3_flag(tp, ENABLE_ASF))
10429 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10431 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10433 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10435 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10437 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10439 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10441 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10443 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10445 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10447 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10449 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10451 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10453 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10455 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10463 if (tg3_flag(tp, ENABLE_APE))
10464 /* Write our heartbeat update interval to APE. */
10465 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10466 APE_HOST_HEARTBEAT_INT_DISABLE);
10468 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10473 /* Called at device open time to get the chip ready for
10474 * packet processing. Invoked with tp->lock held.
10476 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10478 tg3_switch_clocks(tp);
10480 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10482 return tg3_reset_hw(tp, reset_phy);
10485 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10489 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10490 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10492 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10495 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10496 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10497 memset(ocir, 0, TG3_OCIR_LEN);
10501 /* sysfs attributes for hwmon */
10502 static ssize_t tg3_show_temp(struct device *dev,
10503 struct device_attribute *devattr, char *buf)
10505 struct pci_dev *pdev = to_pci_dev(dev);
10506 struct net_device *netdev = pci_get_drvdata(pdev);
10507 struct tg3 *tp = netdev_priv(netdev);
10508 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10511 spin_lock_bh(&tp->lock);
10512 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10513 sizeof(temperature));
10514 spin_unlock_bh(&tp->lock);
10515 return sprintf(buf, "%u\n", temperature);
10519 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10520 TG3_TEMP_SENSOR_OFFSET);
10521 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10522 TG3_TEMP_CAUTION_OFFSET);
10523 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10524 TG3_TEMP_MAX_OFFSET);
10526 static struct attribute *tg3_attributes[] = {
10527 &sensor_dev_attr_temp1_input.dev_attr.attr,
10528 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10529 &sensor_dev_attr_temp1_max.dev_attr.attr,
10533 static const struct attribute_group tg3_group = {
10534 .attrs = tg3_attributes,
10537 static void tg3_hwmon_close(struct tg3 *tp)
10539 if (tp->hwmon_dev) {
10540 hwmon_device_unregister(tp->hwmon_dev);
10541 tp->hwmon_dev = NULL;
10542 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10546 static void tg3_hwmon_open(struct tg3 *tp)
10550 struct pci_dev *pdev = tp->pdev;
10551 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10553 tg3_sd_scan_scratchpad(tp, ocirs);
10555 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10556 if (!ocirs[i].src_data_length)
10559 size += ocirs[i].src_hdr_length;
10560 size += ocirs[i].src_data_length;
10566 /* Register hwmon sysfs hooks */
10567 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10569 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10573 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10574 if (IS_ERR(tp->hwmon_dev)) {
10575 tp->hwmon_dev = NULL;
10576 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10577 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10582 #define TG3_STAT_ADD32(PSTAT, REG) \
10583 do { u32 __val = tr32(REG); \
10584 (PSTAT)->low += __val; \
10585 if ((PSTAT)->low < __val) \
10586 (PSTAT)->high += 1; \
10589 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10591 struct tg3_hw_stats *sp = tp->hw_stats;
10596 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10597 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10598 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10599 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10600 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10601 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10602 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10603 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10604 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10605 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10606 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10607 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10608 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10609 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10610 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10611 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10614 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10615 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10616 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10617 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10620 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10621 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10622 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10623 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10624 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10625 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10626 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10627 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10628 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10629 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10630 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10631 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10632 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10633 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10635 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10636 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10637 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10638 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10639 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10641 u32 val = tr32(HOSTCC_FLOW_ATTN);
10642 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10644 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10645 sp->rx_discards.low += val;
10646 if (sp->rx_discards.low < val)
10647 sp->rx_discards.high += 1;
10649 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10651 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10654 static void tg3_chk_missed_msi(struct tg3 *tp)
10658 for (i = 0; i < tp->irq_cnt; i++) {
10659 struct tg3_napi *tnapi = &tp->napi[i];
10661 if (tg3_has_work(tnapi)) {
10662 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10663 tnapi->last_tx_cons == tnapi->tx_cons) {
10664 if (tnapi->chk_msi_cnt < 1) {
10665 tnapi->chk_msi_cnt++;
10671 tnapi->chk_msi_cnt = 0;
10672 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10673 tnapi->last_tx_cons = tnapi->tx_cons;
10677 static void tg3_timer(unsigned long __opaque)
10679 struct tg3 *tp = (struct tg3 *) __opaque;
10681 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10682 goto restart_timer;
10684 spin_lock(&tp->lock);
10686 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10687 tg3_flag(tp, 57765_CLASS))
10688 tg3_chk_missed_msi(tp);
10690 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10691 /* BCM4785: Flush posted writes from GbE to host memory. */
10695 if (!tg3_flag(tp, TAGGED_STATUS)) {
10696 /* All of this garbage is because when using non-tagged
10697 * IRQ status the mailbox/status_block protocol the chip
10698 * uses with the cpu is race prone.
10700 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10701 tw32(GRC_LOCAL_CTRL,
10702 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10704 tw32(HOSTCC_MODE, tp->coalesce_mode |
10705 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10708 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10709 spin_unlock(&tp->lock);
10710 tg3_reset_task_schedule(tp);
10711 goto restart_timer;
10715 /* This part only runs once per second. */
10716 if (!--tp->timer_counter) {
10717 if (tg3_flag(tp, 5705_PLUS))
10718 tg3_periodic_fetch_stats(tp);
10720 if (tp->setlpicnt && !--tp->setlpicnt)
10721 tg3_phy_eee_enable(tp);
10723 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10727 mac_stat = tr32(MAC_STATUS);
10730 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10731 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10733 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10737 tg3_setup_phy(tp, false);
10738 } else if (tg3_flag(tp, POLL_SERDES)) {
10739 u32 mac_stat = tr32(MAC_STATUS);
10740 int need_setup = 0;
10743 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10746 if (!tp->link_up &&
10747 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10748 MAC_STATUS_SIGNAL_DET))) {
10752 if (!tp->serdes_counter) {
10755 ~MAC_MODE_PORT_MODE_MASK));
10757 tw32_f(MAC_MODE, tp->mac_mode);
10760 tg3_setup_phy(tp, false);
10762 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10763 tg3_flag(tp, 5780_CLASS)) {
10764 tg3_serdes_parallel_detect(tp);
10767 tp->timer_counter = tp->timer_multiplier;
10770 /* Heartbeat is only sent once every 2 seconds.
10772 * The heartbeat is to tell the ASF firmware that the host
10773 * driver is still alive. In the event that the OS crashes,
10774 * ASF needs to reset the hardware to free up the FIFO space
10775 * that may be filled with rx packets destined for the host.
10776 * If the FIFO is full, ASF will no longer function properly.
10778 * Unintended resets have been reported on real time kernels
10779 * where the timer doesn't run on time. Netpoll will also have
10782 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10783 * to check the ring condition when the heartbeat is expiring
10784 * before doing the reset. This will prevent most unintended
10787 if (!--tp->asf_counter) {
10788 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10789 tg3_wait_for_event_ack(tp);
10791 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10792 FWCMD_NICDRV_ALIVE3);
10793 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10794 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10795 TG3_FW_UPDATE_TIMEOUT_SEC);
10797 tg3_generate_fw_event(tp);
10799 tp->asf_counter = tp->asf_multiplier;
10802 spin_unlock(&tp->lock);
10805 tp->timer.expires = jiffies + tp->timer_offset;
10806 add_timer(&tp->timer);
10809 static void tg3_timer_init(struct tg3 *tp)
10811 if (tg3_flag(tp, TAGGED_STATUS) &&
10812 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10813 !tg3_flag(tp, 57765_CLASS))
10814 tp->timer_offset = HZ;
10816 tp->timer_offset = HZ / 10;
10818 BUG_ON(tp->timer_offset > HZ);
10820 tp->timer_multiplier = (HZ / tp->timer_offset);
10821 tp->asf_multiplier = (HZ / tp->timer_offset) *
10822 TG3_FW_UPDATE_FREQ_SEC;
10824 init_timer(&tp->timer);
10825 tp->timer.data = (unsigned long) tp;
10826 tp->timer.function = tg3_timer;
10829 static void tg3_timer_start(struct tg3 *tp)
10831 tp->asf_counter = tp->asf_multiplier;
10832 tp->timer_counter = tp->timer_multiplier;
10834 tp->timer.expires = jiffies + tp->timer_offset;
10835 add_timer(&tp->timer);
10838 static void tg3_timer_stop(struct tg3 *tp)
10840 del_timer_sync(&tp->timer);
10843 /* Restart hardware after configuration changes, self-test, etc.
10844 * Invoked with tp->lock held.
10846 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10847 __releases(tp->lock)
10848 __acquires(tp->lock)
10852 err = tg3_init_hw(tp, reset_phy);
10854 netdev_err(tp->dev,
10855 "Failed to re-initialize device, aborting\n");
10856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10857 tg3_full_unlock(tp);
10858 tg3_timer_stop(tp);
10860 tg3_napi_enable(tp);
10861 dev_close(tp->dev);
10862 tg3_full_lock(tp, 0);
10867 static void tg3_reset_task(struct work_struct *work)
10869 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10872 tg3_full_lock(tp, 0);
10874 if (!netif_running(tp->dev)) {
10875 tg3_flag_clear(tp, RESET_TASK_PENDING);
10876 tg3_full_unlock(tp);
10880 tg3_full_unlock(tp);
10884 tg3_netif_stop(tp);
10886 tg3_full_lock(tp, 1);
10888 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10889 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10890 tp->write32_rx_mbox = tg3_write_flush_reg32;
10891 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10892 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10895 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10896 err = tg3_init_hw(tp, true);
10900 tg3_netif_start(tp);
10903 tg3_full_unlock(tp);
10908 tg3_flag_clear(tp, RESET_TASK_PENDING);
10911 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10914 unsigned long flags;
10916 struct tg3_napi *tnapi = &tp->napi[irq_num];
10918 if (tp->irq_cnt == 1)
10919 name = tp->dev->name;
10921 name = &tnapi->irq_lbl[0];
10922 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10923 name[IFNAMSIZ-1] = 0;
10926 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10928 if (tg3_flag(tp, 1SHOT_MSI))
10929 fn = tg3_msi_1shot;
10932 fn = tg3_interrupt;
10933 if (tg3_flag(tp, TAGGED_STATUS))
10934 fn = tg3_interrupt_tagged;
10935 flags = IRQF_SHARED;
10938 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10941 static int tg3_test_interrupt(struct tg3 *tp)
10943 struct tg3_napi *tnapi = &tp->napi[0];
10944 struct net_device *dev = tp->dev;
10945 int err, i, intr_ok = 0;
10948 if (!netif_running(dev))
10951 tg3_disable_ints(tp);
10953 free_irq(tnapi->irq_vec, tnapi);
10956 * Turn off MSI one shot mode. Otherwise this test has no
10957 * observable way to know whether the interrupt was delivered.
10959 if (tg3_flag(tp, 57765_PLUS)) {
10960 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10961 tw32(MSGINT_MODE, val);
10964 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10965 IRQF_SHARED, dev->name, tnapi);
10969 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10970 tg3_enable_ints(tp);
10972 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10975 for (i = 0; i < 5; i++) {
10976 u32 int_mbox, misc_host_ctrl;
10978 int_mbox = tr32_mailbox(tnapi->int_mbox);
10979 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10981 if ((int_mbox != 0) ||
10982 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10987 if (tg3_flag(tp, 57765_PLUS) &&
10988 tnapi->hw_status->status_tag != tnapi->last_tag)
10989 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10994 tg3_disable_ints(tp);
10996 free_irq(tnapi->irq_vec, tnapi);
10998 err = tg3_request_irq(tp, 0);
11004 /* Reenable MSI one shot mode. */
11005 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11006 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11007 tw32(MSGINT_MODE, val);
11015 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11016 * successfully restored
11018 static int tg3_test_msi(struct tg3 *tp)
11023 if (!tg3_flag(tp, USING_MSI))
11026 /* Turn off SERR reporting in case MSI terminates with Master
11029 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11030 pci_write_config_word(tp->pdev, PCI_COMMAND,
11031 pci_cmd & ~PCI_COMMAND_SERR);
11033 err = tg3_test_interrupt(tp);
11035 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11040 /* other failures */
11044 /* MSI test failed, go back to INTx mode */
11045 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11046 "to INTx mode. Please report this failure to the PCI "
11047 "maintainer and include system chipset information\n");
11049 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11051 pci_disable_msi(tp->pdev);
11053 tg3_flag_clear(tp, USING_MSI);
11054 tp->napi[0].irq_vec = tp->pdev->irq;
11056 err = tg3_request_irq(tp, 0);
11060 /* Need to reset the chip because the MSI cycle may have terminated
11061 * with Master Abort.
11063 tg3_full_lock(tp, 1);
11065 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11066 err = tg3_init_hw(tp, true);
11068 tg3_full_unlock(tp);
11071 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11076 static int tg3_request_firmware(struct tg3 *tp)
11078 const struct tg3_firmware_hdr *fw_hdr;
11080 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11081 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11086 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11088 /* Firmware blob starts with version numbers, followed by
11089 * start address and _full_ length including BSS sections
11090 * (which must be longer than the actual data, of course
11093 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11094 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11095 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11096 tp->fw_len, tp->fw_needed);
11097 release_firmware(tp->fw);
11102 /* We no longer need firmware; we have it. */
11103 tp->fw_needed = NULL;
11107 static u32 tg3_irq_count(struct tg3 *tp)
11109 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11112 /* We want as many rx rings enabled as there are cpus.
11113 * In multiqueue MSI-X mode, the first MSI-X vector
11114 * only deals with link interrupts, etc, so we add
11115 * one to the number of vectors we are requesting.
11117 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11123 static bool tg3_enable_msix(struct tg3 *tp)
11126 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11128 tp->txq_cnt = tp->txq_req;
11129 tp->rxq_cnt = tp->rxq_req;
11131 tp->rxq_cnt = netif_get_num_default_rss_queues();
11132 if (tp->rxq_cnt > tp->rxq_max)
11133 tp->rxq_cnt = tp->rxq_max;
11135 /* Disable multiple TX rings by default. Simple round-robin hardware
11136 * scheduling of the TX rings can cause starvation of rings with
11137 * small packets when other rings have TSO or jumbo packets.
11142 tp->irq_cnt = tg3_irq_count(tp);
11144 for (i = 0; i < tp->irq_max; i++) {
11145 msix_ent[i].entry = i;
11146 msix_ent[i].vector = 0;
11149 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11152 } else if (rc != 0) {
11153 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11155 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11158 tp->rxq_cnt = max(rc - 1, 1);
11160 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11163 for (i = 0; i < tp->irq_max; i++)
11164 tp->napi[i].irq_vec = msix_ent[i].vector;
11166 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11167 pci_disable_msix(tp->pdev);
11171 if (tp->irq_cnt == 1)
11174 tg3_flag_set(tp, ENABLE_RSS);
11176 if (tp->txq_cnt > 1)
11177 tg3_flag_set(tp, ENABLE_TSS);
11179 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11184 static void tg3_ints_init(struct tg3 *tp)
11186 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11187 !tg3_flag(tp, TAGGED_STATUS)) {
11188 /* All MSI supporting chips should support tagged
11189 * status. Assert that this is the case.
11191 netdev_warn(tp->dev,
11192 "MSI without TAGGED_STATUS? Not using MSI\n");
11196 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11197 tg3_flag_set(tp, USING_MSIX);
11198 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11199 tg3_flag_set(tp, USING_MSI);
11201 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11202 u32 msi_mode = tr32(MSGINT_MODE);
11203 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11204 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11205 if (!tg3_flag(tp, 1SHOT_MSI))
11206 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11207 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11210 if (!tg3_flag(tp, USING_MSIX)) {
11212 tp->napi[0].irq_vec = tp->pdev->irq;
11215 if (tp->irq_cnt == 1) {
11218 netif_set_real_num_tx_queues(tp->dev, 1);
11219 netif_set_real_num_rx_queues(tp->dev, 1);
11223 static void tg3_ints_fini(struct tg3 *tp)
11225 if (tg3_flag(tp, USING_MSIX))
11226 pci_disable_msix(tp->pdev);
11227 else if (tg3_flag(tp, USING_MSI))
11228 pci_disable_msi(tp->pdev);
11229 tg3_flag_clear(tp, USING_MSI);
11230 tg3_flag_clear(tp, USING_MSIX);
11231 tg3_flag_clear(tp, ENABLE_RSS);
11232 tg3_flag_clear(tp, ENABLE_TSS);
11235 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11238 struct net_device *dev = tp->dev;
11242 * Setup interrupts first so we know how
11243 * many NAPI resources to allocate
11247 tg3_rss_check_indir_tbl(tp);
11249 /* The placement of this call is tied
11250 * to the setup and use of Host TX descriptors.
11252 err = tg3_alloc_consistent(tp);
11254 goto out_ints_fini;
11258 tg3_napi_enable(tp);
11260 for (i = 0; i < tp->irq_cnt; i++) {
11261 struct tg3_napi *tnapi = &tp->napi[i];
11262 err = tg3_request_irq(tp, i);
11264 for (i--; i >= 0; i--) {
11265 tnapi = &tp->napi[i];
11266 free_irq(tnapi->irq_vec, tnapi);
11268 goto out_napi_fini;
11272 tg3_full_lock(tp, 0);
11275 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11277 err = tg3_init_hw(tp, reset_phy);
11279 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11280 tg3_free_rings(tp);
11283 tg3_full_unlock(tp);
11288 if (test_irq && tg3_flag(tp, USING_MSI)) {
11289 err = tg3_test_msi(tp);
11292 tg3_full_lock(tp, 0);
11293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11294 tg3_free_rings(tp);
11295 tg3_full_unlock(tp);
11297 goto out_napi_fini;
11300 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11301 u32 val = tr32(PCIE_TRANSACTION_CFG);
11303 tw32(PCIE_TRANSACTION_CFG,
11304 val | PCIE_TRANS_CFG_1SHOT_MSI);
11310 tg3_hwmon_open(tp);
11312 tg3_full_lock(tp, 0);
11314 tg3_timer_start(tp);
11315 tg3_flag_set(tp, INIT_COMPLETE);
11316 tg3_enable_ints(tp);
11321 tg3_ptp_resume(tp);
11324 tg3_full_unlock(tp);
11326 netif_tx_start_all_queues(dev);
11329 * Reset loopback feature if it was turned on while the device was down
11330 * make sure that it's installed properly now.
11332 if (dev->features & NETIF_F_LOOPBACK)
11333 tg3_set_loopback(dev, dev->features);
11338 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11339 struct tg3_napi *tnapi = &tp->napi[i];
11340 free_irq(tnapi->irq_vec, tnapi);
11344 tg3_napi_disable(tp);
11346 tg3_free_consistent(tp);
11354 static void tg3_stop(struct tg3 *tp)
11358 tg3_reset_task_cancel(tp);
11359 tg3_netif_stop(tp);
11361 tg3_timer_stop(tp);
11363 tg3_hwmon_close(tp);
11367 tg3_full_lock(tp, 1);
11369 tg3_disable_ints(tp);
11371 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11372 tg3_free_rings(tp);
11373 tg3_flag_clear(tp, INIT_COMPLETE);
11375 tg3_full_unlock(tp);
11377 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11378 struct tg3_napi *tnapi = &tp->napi[i];
11379 free_irq(tnapi->irq_vec, tnapi);
11386 tg3_free_consistent(tp);
11389 static int tg3_open(struct net_device *dev)
11391 struct tg3 *tp = netdev_priv(dev);
11394 if (tp->fw_needed) {
11395 err = tg3_request_firmware(tp);
11396 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11398 netdev_warn(tp->dev, "EEE capability disabled\n");
11399 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11400 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11401 netdev_warn(tp->dev, "EEE capability restored\n");
11402 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11404 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11408 netdev_warn(tp->dev, "TSO capability disabled\n");
11409 tg3_flag_clear(tp, TSO_CAPABLE);
11410 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11411 netdev_notice(tp->dev, "TSO capability restored\n");
11412 tg3_flag_set(tp, TSO_CAPABLE);
11416 tg3_carrier_off(tp);
11418 err = tg3_power_up(tp);
11422 tg3_full_lock(tp, 0);
11424 tg3_disable_ints(tp);
11425 tg3_flag_clear(tp, INIT_COMPLETE);
11427 tg3_full_unlock(tp);
11429 err = tg3_start(tp,
11430 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11433 tg3_frob_aux_power(tp, false);
11434 pci_set_power_state(tp->pdev, PCI_D3hot);
11437 if (tg3_flag(tp, PTP_CAPABLE)) {
11438 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11440 if (IS_ERR(tp->ptp_clock))
11441 tp->ptp_clock = NULL;
11447 static int tg3_close(struct net_device *dev)
11449 struct tg3 *tp = netdev_priv(dev);
11455 /* Clear stats across close / open calls */
11456 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11457 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11459 tg3_power_down(tp);
11461 tg3_carrier_off(tp);
11466 static inline u64 get_stat64(tg3_stat64_t *val)
11468 return ((u64)val->high << 32) | ((u64)val->low);
11471 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11473 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11475 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11476 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11477 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11480 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11481 tg3_writephy(tp, MII_TG3_TEST1,
11482 val | MII_TG3_TEST1_CRC_EN);
11483 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11487 tp->phy_crc_errors += val;
11489 return tp->phy_crc_errors;
11492 return get_stat64(&hw_stats->rx_fcs_errors);
11495 #define ESTAT_ADD(member) \
11496 estats->member = old_estats->member + \
11497 get_stat64(&hw_stats->member)
11499 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11501 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11502 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11504 ESTAT_ADD(rx_octets);
11505 ESTAT_ADD(rx_fragments);
11506 ESTAT_ADD(rx_ucast_packets);
11507 ESTAT_ADD(rx_mcast_packets);
11508 ESTAT_ADD(rx_bcast_packets);
11509 ESTAT_ADD(rx_fcs_errors);
11510 ESTAT_ADD(rx_align_errors);
11511 ESTAT_ADD(rx_xon_pause_rcvd);
11512 ESTAT_ADD(rx_xoff_pause_rcvd);
11513 ESTAT_ADD(rx_mac_ctrl_rcvd);
11514 ESTAT_ADD(rx_xoff_entered);
11515 ESTAT_ADD(rx_frame_too_long_errors);
11516 ESTAT_ADD(rx_jabbers);
11517 ESTAT_ADD(rx_undersize_packets);
11518 ESTAT_ADD(rx_in_length_errors);
11519 ESTAT_ADD(rx_out_length_errors);
11520 ESTAT_ADD(rx_64_or_less_octet_packets);
11521 ESTAT_ADD(rx_65_to_127_octet_packets);
11522 ESTAT_ADD(rx_128_to_255_octet_packets);
11523 ESTAT_ADD(rx_256_to_511_octet_packets);
11524 ESTAT_ADD(rx_512_to_1023_octet_packets);
11525 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11526 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11527 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11528 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11529 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11531 ESTAT_ADD(tx_octets);
11532 ESTAT_ADD(tx_collisions);
11533 ESTAT_ADD(tx_xon_sent);
11534 ESTAT_ADD(tx_xoff_sent);
11535 ESTAT_ADD(tx_flow_control);
11536 ESTAT_ADD(tx_mac_errors);
11537 ESTAT_ADD(tx_single_collisions);
11538 ESTAT_ADD(tx_mult_collisions);
11539 ESTAT_ADD(tx_deferred);
11540 ESTAT_ADD(tx_excessive_collisions);
11541 ESTAT_ADD(tx_late_collisions);
11542 ESTAT_ADD(tx_collide_2times);
11543 ESTAT_ADD(tx_collide_3times);
11544 ESTAT_ADD(tx_collide_4times);
11545 ESTAT_ADD(tx_collide_5times);
11546 ESTAT_ADD(tx_collide_6times);
11547 ESTAT_ADD(tx_collide_7times);
11548 ESTAT_ADD(tx_collide_8times);
11549 ESTAT_ADD(tx_collide_9times);
11550 ESTAT_ADD(tx_collide_10times);
11551 ESTAT_ADD(tx_collide_11times);
11552 ESTAT_ADD(tx_collide_12times);
11553 ESTAT_ADD(tx_collide_13times);
11554 ESTAT_ADD(tx_collide_14times);
11555 ESTAT_ADD(tx_collide_15times);
11556 ESTAT_ADD(tx_ucast_packets);
11557 ESTAT_ADD(tx_mcast_packets);
11558 ESTAT_ADD(tx_bcast_packets);
11559 ESTAT_ADD(tx_carrier_sense_errors);
11560 ESTAT_ADD(tx_discards);
11561 ESTAT_ADD(tx_errors);
11563 ESTAT_ADD(dma_writeq_full);
11564 ESTAT_ADD(dma_write_prioq_full);
11565 ESTAT_ADD(rxbds_empty);
11566 ESTAT_ADD(rx_discards);
11567 ESTAT_ADD(rx_errors);
11568 ESTAT_ADD(rx_threshold_hit);
11570 ESTAT_ADD(dma_readq_full);
11571 ESTAT_ADD(dma_read_prioq_full);
11572 ESTAT_ADD(tx_comp_queue_full);
11574 ESTAT_ADD(ring_set_send_prod_index);
11575 ESTAT_ADD(ring_status_update);
11576 ESTAT_ADD(nic_irqs);
11577 ESTAT_ADD(nic_avoided_irqs);
11578 ESTAT_ADD(nic_tx_threshold_hit);
11580 ESTAT_ADD(mbuf_lwm_thresh_hit);
11583 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11585 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11586 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11588 stats->rx_packets = old_stats->rx_packets +
11589 get_stat64(&hw_stats->rx_ucast_packets) +
11590 get_stat64(&hw_stats->rx_mcast_packets) +
11591 get_stat64(&hw_stats->rx_bcast_packets);
11593 stats->tx_packets = old_stats->tx_packets +
11594 get_stat64(&hw_stats->tx_ucast_packets) +
11595 get_stat64(&hw_stats->tx_mcast_packets) +
11596 get_stat64(&hw_stats->tx_bcast_packets);
11598 stats->rx_bytes = old_stats->rx_bytes +
11599 get_stat64(&hw_stats->rx_octets);
11600 stats->tx_bytes = old_stats->tx_bytes +
11601 get_stat64(&hw_stats->tx_octets);
11603 stats->rx_errors = old_stats->rx_errors +
11604 get_stat64(&hw_stats->rx_errors);
11605 stats->tx_errors = old_stats->tx_errors +
11606 get_stat64(&hw_stats->tx_errors) +
11607 get_stat64(&hw_stats->tx_mac_errors) +
11608 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11609 get_stat64(&hw_stats->tx_discards);
11611 stats->multicast = old_stats->multicast +
11612 get_stat64(&hw_stats->rx_mcast_packets);
11613 stats->collisions = old_stats->collisions +
11614 get_stat64(&hw_stats->tx_collisions);
11616 stats->rx_length_errors = old_stats->rx_length_errors +
11617 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11618 get_stat64(&hw_stats->rx_undersize_packets);
11620 stats->rx_over_errors = old_stats->rx_over_errors +
11621 get_stat64(&hw_stats->rxbds_empty);
11622 stats->rx_frame_errors = old_stats->rx_frame_errors +
11623 get_stat64(&hw_stats->rx_align_errors);
11624 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11625 get_stat64(&hw_stats->tx_discards);
11626 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11627 get_stat64(&hw_stats->tx_carrier_sense_errors);
11629 stats->rx_crc_errors = old_stats->rx_crc_errors +
11630 tg3_calc_crc_errors(tp);
11632 stats->rx_missed_errors = old_stats->rx_missed_errors +
11633 get_stat64(&hw_stats->rx_discards);
11635 stats->rx_dropped = tp->rx_dropped;
11636 stats->tx_dropped = tp->tx_dropped;
11639 static int tg3_get_regs_len(struct net_device *dev)
11641 return TG3_REG_BLK_SIZE;
11644 static void tg3_get_regs(struct net_device *dev,
11645 struct ethtool_regs *regs, void *_p)
11647 struct tg3 *tp = netdev_priv(dev);
11651 memset(_p, 0, TG3_REG_BLK_SIZE);
11653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11656 tg3_full_lock(tp, 0);
11658 tg3_dump_legacy_regs(tp, (u32 *)_p);
11660 tg3_full_unlock(tp);
11663 static int tg3_get_eeprom_len(struct net_device *dev)
11665 struct tg3 *tp = netdev_priv(dev);
11667 return tp->nvram_size;
11670 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11672 struct tg3 *tp = netdev_priv(dev);
11675 u32 i, offset, len, b_offset, b_count;
11678 if (tg3_flag(tp, NO_NVRAM))
11681 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11684 offset = eeprom->offset;
11688 eeprom->magic = TG3_EEPROM_MAGIC;
11691 /* adjustments to start on required 4 byte boundary */
11692 b_offset = offset & 3;
11693 b_count = 4 - b_offset;
11694 if (b_count > len) {
11695 /* i.e. offset=1 len=2 */
11698 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11701 memcpy(data, ((char *)&val) + b_offset, b_count);
11704 eeprom->len += b_count;
11707 /* read bytes up to the last 4 byte boundary */
11708 pd = &data[eeprom->len];
11709 for (i = 0; i < (len - (len & 3)); i += 4) {
11710 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11715 memcpy(pd + i, &val, 4);
11720 /* read last bytes not ending on 4 byte boundary */
11721 pd = &data[eeprom->len];
11723 b_offset = offset + len - b_count;
11724 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11727 memcpy(pd, &val, b_count);
11728 eeprom->len += b_count;
11733 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11735 struct tg3 *tp = netdev_priv(dev);
11737 u32 offset, len, b_offset, odd_len;
11741 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11744 if (tg3_flag(tp, NO_NVRAM) ||
11745 eeprom->magic != TG3_EEPROM_MAGIC)
11748 offset = eeprom->offset;
11751 if ((b_offset = (offset & 3))) {
11752 /* adjustments to start on required 4 byte boundary */
11753 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11764 /* adjustments to end on required 4 byte boundary */
11766 len = (len + 3) & ~3;
11767 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11773 if (b_offset || odd_len) {
11774 buf = kmalloc(len, GFP_KERNEL);
11778 memcpy(buf, &start, 4);
11780 memcpy(buf+len-4, &end, 4);
11781 memcpy(buf + b_offset, data, eeprom->len);
11784 ret = tg3_nvram_write_block(tp, offset, len, buf);
11792 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11794 struct tg3 *tp = netdev_priv(dev);
11796 if (tg3_flag(tp, USE_PHYLIB)) {
11797 struct phy_device *phydev;
11798 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11800 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11801 return phy_ethtool_gset(phydev, cmd);
11804 cmd->supported = (SUPPORTED_Autoneg);
11806 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11807 cmd->supported |= (SUPPORTED_1000baseT_Half |
11808 SUPPORTED_1000baseT_Full);
11810 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11811 cmd->supported |= (SUPPORTED_100baseT_Half |
11812 SUPPORTED_100baseT_Full |
11813 SUPPORTED_10baseT_Half |
11814 SUPPORTED_10baseT_Full |
11816 cmd->port = PORT_TP;
11818 cmd->supported |= SUPPORTED_FIBRE;
11819 cmd->port = PORT_FIBRE;
11822 cmd->advertising = tp->link_config.advertising;
11823 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11824 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11825 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11826 cmd->advertising |= ADVERTISED_Pause;
11828 cmd->advertising |= ADVERTISED_Pause |
11829 ADVERTISED_Asym_Pause;
11831 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11832 cmd->advertising |= ADVERTISED_Asym_Pause;
11835 if (netif_running(dev) && tp->link_up) {
11836 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11837 cmd->duplex = tp->link_config.active_duplex;
11838 cmd->lp_advertising = tp->link_config.rmt_adv;
11839 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11840 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11841 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11843 cmd->eth_tp_mdix = ETH_TP_MDI;
11846 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11847 cmd->duplex = DUPLEX_UNKNOWN;
11848 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11850 cmd->phy_address = tp->phy_addr;
11851 cmd->transceiver = XCVR_INTERNAL;
11852 cmd->autoneg = tp->link_config.autoneg;
11858 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11860 struct tg3 *tp = netdev_priv(dev);
11861 u32 speed = ethtool_cmd_speed(cmd);
11863 if (tg3_flag(tp, USE_PHYLIB)) {
11864 struct phy_device *phydev;
11865 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11867 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11868 return phy_ethtool_sset(phydev, cmd);
11871 if (cmd->autoneg != AUTONEG_ENABLE &&
11872 cmd->autoneg != AUTONEG_DISABLE)
11875 if (cmd->autoneg == AUTONEG_DISABLE &&
11876 cmd->duplex != DUPLEX_FULL &&
11877 cmd->duplex != DUPLEX_HALF)
11880 if (cmd->autoneg == AUTONEG_ENABLE) {
11881 u32 mask = ADVERTISED_Autoneg |
11883 ADVERTISED_Asym_Pause;
11885 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11886 mask |= ADVERTISED_1000baseT_Half |
11887 ADVERTISED_1000baseT_Full;
11889 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11890 mask |= ADVERTISED_100baseT_Half |
11891 ADVERTISED_100baseT_Full |
11892 ADVERTISED_10baseT_Half |
11893 ADVERTISED_10baseT_Full |
11896 mask |= ADVERTISED_FIBRE;
11898 if (cmd->advertising & ~mask)
11901 mask &= (ADVERTISED_1000baseT_Half |
11902 ADVERTISED_1000baseT_Full |
11903 ADVERTISED_100baseT_Half |
11904 ADVERTISED_100baseT_Full |
11905 ADVERTISED_10baseT_Half |
11906 ADVERTISED_10baseT_Full);
11908 cmd->advertising &= mask;
11910 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11911 if (speed != SPEED_1000)
11914 if (cmd->duplex != DUPLEX_FULL)
11917 if (speed != SPEED_100 &&
11923 tg3_full_lock(tp, 0);
11925 tp->link_config.autoneg = cmd->autoneg;
11926 if (cmd->autoneg == AUTONEG_ENABLE) {
11927 tp->link_config.advertising = (cmd->advertising |
11928 ADVERTISED_Autoneg);
11929 tp->link_config.speed = SPEED_UNKNOWN;
11930 tp->link_config.duplex = DUPLEX_UNKNOWN;
11932 tp->link_config.advertising = 0;
11933 tp->link_config.speed = speed;
11934 tp->link_config.duplex = cmd->duplex;
11937 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11939 tg3_warn_mgmt_link_flap(tp);
11941 if (netif_running(dev))
11942 tg3_setup_phy(tp, true);
11944 tg3_full_unlock(tp);
11949 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11951 struct tg3 *tp = netdev_priv(dev);
11953 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11954 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11955 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11956 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11959 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11961 struct tg3 *tp = netdev_priv(dev);
11963 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11964 wol->supported = WAKE_MAGIC;
11966 wol->supported = 0;
11968 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11969 wol->wolopts = WAKE_MAGIC;
11970 memset(&wol->sopass, 0, sizeof(wol->sopass));
11973 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11975 struct tg3 *tp = netdev_priv(dev);
11976 struct device *dp = &tp->pdev->dev;
11978 if (wol->wolopts & ~WAKE_MAGIC)
11980 if ((wol->wolopts & WAKE_MAGIC) &&
11981 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11984 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11986 spin_lock_bh(&tp->lock);
11987 if (device_may_wakeup(dp))
11988 tg3_flag_set(tp, WOL_ENABLE);
11990 tg3_flag_clear(tp, WOL_ENABLE);
11991 spin_unlock_bh(&tp->lock);
11996 static u32 tg3_get_msglevel(struct net_device *dev)
11998 struct tg3 *tp = netdev_priv(dev);
11999 return tp->msg_enable;
12002 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12004 struct tg3 *tp = netdev_priv(dev);
12005 tp->msg_enable = value;
12008 static int tg3_nway_reset(struct net_device *dev)
12010 struct tg3 *tp = netdev_priv(dev);
12013 if (!netif_running(dev))
12016 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12019 tg3_warn_mgmt_link_flap(tp);
12021 if (tg3_flag(tp, USE_PHYLIB)) {
12022 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12024 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12028 spin_lock_bh(&tp->lock);
12030 tg3_readphy(tp, MII_BMCR, &bmcr);
12031 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12032 ((bmcr & BMCR_ANENABLE) ||
12033 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12034 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12038 spin_unlock_bh(&tp->lock);
12044 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12046 struct tg3 *tp = netdev_priv(dev);
12048 ering->rx_max_pending = tp->rx_std_ring_mask;
12049 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12050 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12052 ering->rx_jumbo_max_pending = 0;
12054 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12056 ering->rx_pending = tp->rx_pending;
12057 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12058 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12060 ering->rx_jumbo_pending = 0;
12062 ering->tx_pending = tp->napi[0].tx_pending;
12065 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12067 struct tg3 *tp = netdev_priv(dev);
12068 int i, irq_sync = 0, err = 0;
12070 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12071 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12072 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12073 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12074 (tg3_flag(tp, TSO_BUG) &&
12075 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12078 if (netif_running(dev)) {
12080 tg3_netif_stop(tp);
12084 tg3_full_lock(tp, irq_sync);
12086 tp->rx_pending = ering->rx_pending;
12088 if (tg3_flag(tp, MAX_RXPEND_64) &&
12089 tp->rx_pending > 63)
12090 tp->rx_pending = 63;
12091 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12093 for (i = 0; i < tp->irq_max; i++)
12094 tp->napi[i].tx_pending = ering->tx_pending;
12096 if (netif_running(dev)) {
12097 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12098 err = tg3_restart_hw(tp, false);
12100 tg3_netif_start(tp);
12103 tg3_full_unlock(tp);
12105 if (irq_sync && !err)
12111 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12113 struct tg3 *tp = netdev_priv(dev);
12115 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12117 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12118 epause->rx_pause = 1;
12120 epause->rx_pause = 0;
12122 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12123 epause->tx_pause = 1;
12125 epause->tx_pause = 0;
12128 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12130 struct tg3 *tp = netdev_priv(dev);
12133 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12134 tg3_warn_mgmt_link_flap(tp);
12136 if (tg3_flag(tp, USE_PHYLIB)) {
12138 struct phy_device *phydev;
12140 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12142 if (!(phydev->supported & SUPPORTED_Pause) ||
12143 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12144 (epause->rx_pause != epause->tx_pause)))
12147 tp->link_config.flowctrl = 0;
12148 if (epause->rx_pause) {
12149 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12151 if (epause->tx_pause) {
12152 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12153 newadv = ADVERTISED_Pause;
12155 newadv = ADVERTISED_Pause |
12156 ADVERTISED_Asym_Pause;
12157 } else if (epause->tx_pause) {
12158 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12159 newadv = ADVERTISED_Asym_Pause;
12163 if (epause->autoneg)
12164 tg3_flag_set(tp, PAUSE_AUTONEG);
12166 tg3_flag_clear(tp, PAUSE_AUTONEG);
12168 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12169 u32 oldadv = phydev->advertising &
12170 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12171 if (oldadv != newadv) {
12172 phydev->advertising &=
12173 ~(ADVERTISED_Pause |
12174 ADVERTISED_Asym_Pause);
12175 phydev->advertising |= newadv;
12176 if (phydev->autoneg) {
12178 * Always renegotiate the link to
12179 * inform our link partner of our
12180 * flow control settings, even if the
12181 * flow control is forced. Let
12182 * tg3_adjust_link() do the final
12183 * flow control setup.
12185 return phy_start_aneg(phydev);
12189 if (!epause->autoneg)
12190 tg3_setup_flow_control(tp, 0, 0);
12192 tp->link_config.advertising &=
12193 ~(ADVERTISED_Pause |
12194 ADVERTISED_Asym_Pause);
12195 tp->link_config.advertising |= newadv;
12200 if (netif_running(dev)) {
12201 tg3_netif_stop(tp);
12205 tg3_full_lock(tp, irq_sync);
12207 if (epause->autoneg)
12208 tg3_flag_set(tp, PAUSE_AUTONEG);
12210 tg3_flag_clear(tp, PAUSE_AUTONEG);
12211 if (epause->rx_pause)
12212 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12214 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12215 if (epause->tx_pause)
12216 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12218 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12220 if (netif_running(dev)) {
12221 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12222 err = tg3_restart_hw(tp, false);
12224 tg3_netif_start(tp);
12227 tg3_full_unlock(tp);
12230 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12235 static int tg3_get_sset_count(struct net_device *dev, int sset)
12239 return TG3_NUM_TEST;
12241 return TG3_NUM_STATS;
12243 return -EOPNOTSUPP;
12247 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12248 u32 *rules __always_unused)
12250 struct tg3 *tp = netdev_priv(dev);
12252 if (!tg3_flag(tp, SUPPORT_MSIX))
12253 return -EOPNOTSUPP;
12255 switch (info->cmd) {
12256 case ETHTOOL_GRXRINGS:
12257 if (netif_running(tp->dev))
12258 info->data = tp->rxq_cnt;
12260 info->data = num_online_cpus();
12261 if (info->data > TG3_RSS_MAX_NUM_QS)
12262 info->data = TG3_RSS_MAX_NUM_QS;
12265 /* The first interrupt vector only
12266 * handles link interrupts.
12272 return -EOPNOTSUPP;
12276 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12279 struct tg3 *tp = netdev_priv(dev);
12281 if (tg3_flag(tp, SUPPORT_MSIX))
12282 size = TG3_RSS_INDIR_TBL_SIZE;
12287 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12289 struct tg3 *tp = netdev_priv(dev);
12292 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12293 indir[i] = tp->rss_ind_tbl[i];
12298 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12300 struct tg3 *tp = netdev_priv(dev);
12303 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12304 tp->rss_ind_tbl[i] = indir[i];
12306 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12309 /* It is legal to write the indirection
12310 * table while the device is running.
12312 tg3_full_lock(tp, 0);
12313 tg3_rss_write_indir_tbl(tp);
12314 tg3_full_unlock(tp);
12319 static void tg3_get_channels(struct net_device *dev,
12320 struct ethtool_channels *channel)
12322 struct tg3 *tp = netdev_priv(dev);
12323 u32 deflt_qs = netif_get_num_default_rss_queues();
12325 channel->max_rx = tp->rxq_max;
12326 channel->max_tx = tp->txq_max;
12328 if (netif_running(dev)) {
12329 channel->rx_count = tp->rxq_cnt;
12330 channel->tx_count = tp->txq_cnt;
12333 channel->rx_count = tp->rxq_req;
12335 channel->rx_count = min(deflt_qs, tp->rxq_max);
12338 channel->tx_count = tp->txq_req;
12340 channel->tx_count = min(deflt_qs, tp->txq_max);
12344 static int tg3_set_channels(struct net_device *dev,
12345 struct ethtool_channels *channel)
12347 struct tg3 *tp = netdev_priv(dev);
12349 if (!tg3_flag(tp, SUPPORT_MSIX))
12350 return -EOPNOTSUPP;
12352 if (channel->rx_count > tp->rxq_max ||
12353 channel->tx_count > tp->txq_max)
12356 tp->rxq_req = channel->rx_count;
12357 tp->txq_req = channel->tx_count;
12359 if (!netif_running(dev))
12364 tg3_carrier_off(tp);
12366 tg3_start(tp, true, false, false);
12371 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12373 switch (stringset) {
12375 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12378 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12381 WARN_ON(1); /* we need a WARN() */
12386 static int tg3_set_phys_id(struct net_device *dev,
12387 enum ethtool_phys_id_state state)
12389 struct tg3 *tp = netdev_priv(dev);
12391 if (!netif_running(tp->dev))
12395 case ETHTOOL_ID_ACTIVE:
12396 return 1; /* cycle on/off once per second */
12398 case ETHTOOL_ID_ON:
12399 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12400 LED_CTRL_1000MBPS_ON |
12401 LED_CTRL_100MBPS_ON |
12402 LED_CTRL_10MBPS_ON |
12403 LED_CTRL_TRAFFIC_OVERRIDE |
12404 LED_CTRL_TRAFFIC_BLINK |
12405 LED_CTRL_TRAFFIC_LED);
12408 case ETHTOOL_ID_OFF:
12409 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12410 LED_CTRL_TRAFFIC_OVERRIDE);
12413 case ETHTOOL_ID_INACTIVE:
12414 tw32(MAC_LED_CTRL, tp->led_ctrl);
12421 static void tg3_get_ethtool_stats(struct net_device *dev,
12422 struct ethtool_stats *estats, u64 *tmp_stats)
12424 struct tg3 *tp = netdev_priv(dev);
12427 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12429 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12432 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12436 u32 offset = 0, len = 0;
12439 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12442 if (magic == TG3_EEPROM_MAGIC) {
12443 for (offset = TG3_NVM_DIR_START;
12444 offset < TG3_NVM_DIR_END;
12445 offset += TG3_NVM_DIRENT_SIZE) {
12446 if (tg3_nvram_read(tp, offset, &val))
12449 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12450 TG3_NVM_DIRTYPE_EXTVPD)
12454 if (offset != TG3_NVM_DIR_END) {
12455 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12456 if (tg3_nvram_read(tp, offset + 4, &offset))
12459 offset = tg3_nvram_logical_addr(tp, offset);
12463 if (!offset || !len) {
12464 offset = TG3_NVM_VPD_OFF;
12465 len = TG3_NVM_VPD_LEN;
12468 buf = kmalloc(len, GFP_KERNEL);
12472 if (magic == TG3_EEPROM_MAGIC) {
12473 for (i = 0; i < len; i += 4) {
12474 /* The data is in little-endian format in NVRAM.
12475 * Use the big-endian read routines to preserve
12476 * the byte order as it exists in NVRAM.
12478 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12484 unsigned int pos = 0;
12486 ptr = (u8 *)&buf[0];
12487 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12488 cnt = pci_read_vpd(tp->pdev, pos,
12490 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12508 #define NVRAM_TEST_SIZE 0x100
12509 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12510 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12511 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12512 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12513 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12514 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12515 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12516 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12518 static int tg3_test_nvram(struct tg3 *tp)
12520 u32 csum, magic, len;
12522 int i, j, k, err = 0, size;
12524 if (tg3_flag(tp, NO_NVRAM))
12527 if (tg3_nvram_read(tp, 0, &magic) != 0)
12530 if (magic == TG3_EEPROM_MAGIC)
12531 size = NVRAM_TEST_SIZE;
12532 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12533 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12534 TG3_EEPROM_SB_FORMAT_1) {
12535 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12536 case TG3_EEPROM_SB_REVISION_0:
12537 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12539 case TG3_EEPROM_SB_REVISION_2:
12540 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12542 case TG3_EEPROM_SB_REVISION_3:
12543 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12545 case TG3_EEPROM_SB_REVISION_4:
12546 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12548 case TG3_EEPROM_SB_REVISION_5:
12549 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12551 case TG3_EEPROM_SB_REVISION_6:
12552 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12559 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12560 size = NVRAM_SELFBOOT_HW_SIZE;
12564 buf = kmalloc(size, GFP_KERNEL);
12569 for (i = 0, j = 0; i < size; i += 4, j++) {
12570 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12577 /* Selfboot format */
12578 magic = be32_to_cpu(buf[0]);
12579 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12580 TG3_EEPROM_MAGIC_FW) {
12581 u8 *buf8 = (u8 *) buf, csum8 = 0;
12583 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12584 TG3_EEPROM_SB_REVISION_2) {
12585 /* For rev 2, the csum doesn't include the MBA. */
12586 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12588 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12591 for (i = 0; i < size; i++)
12604 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12605 TG3_EEPROM_MAGIC_HW) {
12606 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12607 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12608 u8 *buf8 = (u8 *) buf;
12610 /* Separate the parity bits and the data bytes. */
12611 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12612 if ((i == 0) || (i == 8)) {
12616 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12617 parity[k++] = buf8[i] & msk;
12619 } else if (i == 16) {
12623 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12624 parity[k++] = buf8[i] & msk;
12627 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12628 parity[k++] = buf8[i] & msk;
12631 data[j++] = buf8[i];
12635 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12636 u8 hw8 = hweight8(data[i]);
12638 if ((hw8 & 0x1) && parity[i])
12640 else if (!(hw8 & 0x1) && !parity[i])
12649 /* Bootstrap checksum at offset 0x10 */
12650 csum = calc_crc((unsigned char *) buf, 0x10);
12651 if (csum != le32_to_cpu(buf[0x10/4]))
12654 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12655 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12656 if (csum != le32_to_cpu(buf[0xfc/4]))
12661 buf = tg3_vpd_readblock(tp, &len);
12665 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12667 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12671 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12674 i += PCI_VPD_LRDT_TAG_SIZE;
12675 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12676 PCI_VPD_RO_KEYWORD_CHKSUM);
12680 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12682 for (i = 0; i <= j; i++)
12683 csum8 += ((u8 *)buf)[i];
12697 #define TG3_SERDES_TIMEOUT_SEC 2
12698 #define TG3_COPPER_TIMEOUT_SEC 6
12700 static int tg3_test_link(struct tg3 *tp)
12704 if (!netif_running(tp->dev))
12707 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12708 max = TG3_SERDES_TIMEOUT_SEC;
12710 max = TG3_COPPER_TIMEOUT_SEC;
12712 for (i = 0; i < max; i++) {
12716 if (msleep_interruptible(1000))
12723 /* Only test the commonly used registers */
12724 static int tg3_test_registers(struct tg3 *tp)
12726 int i, is_5705, is_5750;
12727 u32 offset, read_mask, write_mask, val, save_val, read_val;
12731 #define TG3_FL_5705 0x1
12732 #define TG3_FL_NOT_5705 0x2
12733 #define TG3_FL_NOT_5788 0x4
12734 #define TG3_FL_NOT_5750 0x8
12738 /* MAC Control Registers */
12739 { MAC_MODE, TG3_FL_NOT_5705,
12740 0x00000000, 0x00ef6f8c },
12741 { MAC_MODE, TG3_FL_5705,
12742 0x00000000, 0x01ef6b8c },
12743 { MAC_STATUS, TG3_FL_NOT_5705,
12744 0x03800107, 0x00000000 },
12745 { MAC_STATUS, TG3_FL_5705,
12746 0x03800100, 0x00000000 },
12747 { MAC_ADDR_0_HIGH, 0x0000,
12748 0x00000000, 0x0000ffff },
12749 { MAC_ADDR_0_LOW, 0x0000,
12750 0x00000000, 0xffffffff },
12751 { MAC_RX_MTU_SIZE, 0x0000,
12752 0x00000000, 0x0000ffff },
12753 { MAC_TX_MODE, 0x0000,
12754 0x00000000, 0x00000070 },
12755 { MAC_TX_LENGTHS, 0x0000,
12756 0x00000000, 0x00003fff },
12757 { MAC_RX_MODE, TG3_FL_NOT_5705,
12758 0x00000000, 0x000007fc },
12759 { MAC_RX_MODE, TG3_FL_5705,
12760 0x00000000, 0x000007dc },
12761 { MAC_HASH_REG_0, 0x0000,
12762 0x00000000, 0xffffffff },
12763 { MAC_HASH_REG_1, 0x0000,
12764 0x00000000, 0xffffffff },
12765 { MAC_HASH_REG_2, 0x0000,
12766 0x00000000, 0xffffffff },
12767 { MAC_HASH_REG_3, 0x0000,
12768 0x00000000, 0xffffffff },
12770 /* Receive Data and Receive BD Initiator Control Registers. */
12771 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12772 0x00000000, 0xffffffff },
12773 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12774 0x00000000, 0xffffffff },
12775 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12776 0x00000000, 0x00000003 },
12777 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12778 0x00000000, 0xffffffff },
12779 { RCVDBDI_STD_BD+0, 0x0000,
12780 0x00000000, 0xffffffff },
12781 { RCVDBDI_STD_BD+4, 0x0000,
12782 0x00000000, 0xffffffff },
12783 { RCVDBDI_STD_BD+8, 0x0000,
12784 0x00000000, 0xffff0002 },
12785 { RCVDBDI_STD_BD+0xc, 0x0000,
12786 0x00000000, 0xffffffff },
12788 /* Receive BD Initiator Control Registers. */
12789 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12790 0x00000000, 0xffffffff },
12791 { RCVBDI_STD_THRESH, TG3_FL_5705,
12792 0x00000000, 0x000003ff },
12793 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12794 0x00000000, 0xffffffff },
12796 /* Host Coalescing Control Registers. */
12797 { HOSTCC_MODE, TG3_FL_NOT_5705,
12798 0x00000000, 0x00000004 },
12799 { HOSTCC_MODE, TG3_FL_5705,
12800 0x00000000, 0x000000f6 },
12801 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12802 0x00000000, 0xffffffff },
12803 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12804 0x00000000, 0x000003ff },
12805 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12806 0x00000000, 0xffffffff },
12807 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12808 0x00000000, 0x000003ff },
12809 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12810 0x00000000, 0xffffffff },
12811 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12812 0x00000000, 0x000000ff },
12813 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12814 0x00000000, 0xffffffff },
12815 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12816 0x00000000, 0x000000ff },
12817 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12818 0x00000000, 0xffffffff },
12819 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12820 0x00000000, 0xffffffff },
12821 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12822 0x00000000, 0xffffffff },
12823 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12824 0x00000000, 0x000000ff },
12825 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12826 0x00000000, 0xffffffff },
12827 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12828 0x00000000, 0x000000ff },
12829 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12830 0x00000000, 0xffffffff },
12831 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12832 0x00000000, 0xffffffff },
12833 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12834 0x00000000, 0xffffffff },
12835 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12836 0x00000000, 0xffffffff },
12837 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12838 0x00000000, 0xffffffff },
12839 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12840 0xffffffff, 0x00000000 },
12841 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12842 0xffffffff, 0x00000000 },
12844 /* Buffer Manager Control Registers. */
12845 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12846 0x00000000, 0x007fff80 },
12847 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12848 0x00000000, 0x007fffff },
12849 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12850 0x00000000, 0x0000003f },
12851 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12852 0x00000000, 0x000001ff },
12853 { BUFMGR_MB_HIGH_WATER, 0x0000,
12854 0x00000000, 0x000001ff },
12855 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12856 0xffffffff, 0x00000000 },
12857 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12858 0xffffffff, 0x00000000 },
12860 /* Mailbox Registers */
12861 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12862 0x00000000, 0x000001ff },
12863 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12864 0x00000000, 0x000001ff },
12865 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12866 0x00000000, 0x000007ff },
12867 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12868 0x00000000, 0x000001ff },
12870 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12873 is_5705 = is_5750 = 0;
12874 if (tg3_flag(tp, 5705_PLUS)) {
12876 if (tg3_flag(tp, 5750_PLUS))
12880 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12881 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12884 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12887 if (tg3_flag(tp, IS_5788) &&
12888 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12891 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12894 offset = (u32) reg_tbl[i].offset;
12895 read_mask = reg_tbl[i].read_mask;
12896 write_mask = reg_tbl[i].write_mask;
12898 /* Save the original register content */
12899 save_val = tr32(offset);
12901 /* Determine the read-only value. */
12902 read_val = save_val & read_mask;
12904 /* Write zero to the register, then make sure the read-only bits
12905 * are not changed and the read/write bits are all zeros.
12909 val = tr32(offset);
12911 /* Test the read-only and read/write bits. */
12912 if (((val & read_mask) != read_val) || (val & write_mask))
12915 /* Write ones to all the bits defined by RdMask and WrMask, then
12916 * make sure the read-only bits are not changed and the
12917 * read/write bits are all ones.
12919 tw32(offset, read_mask | write_mask);
12921 val = tr32(offset);
12923 /* Test the read-only bits. */
12924 if ((val & read_mask) != read_val)
12927 /* Test the read/write bits. */
12928 if ((val & write_mask) != write_mask)
12931 tw32(offset, save_val);
12937 if (netif_msg_hw(tp))
12938 netdev_err(tp->dev,
12939 "Register test failed at offset %x\n", offset);
12940 tw32(offset, save_val);
12944 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12946 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12950 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12951 for (j = 0; j < len; j += 4) {
12954 tg3_write_mem(tp, offset + j, test_pattern[i]);
12955 tg3_read_mem(tp, offset + j, &val);
12956 if (val != test_pattern[i])
12963 static int tg3_test_memory(struct tg3 *tp)
12965 static struct mem_entry {
12968 } mem_tbl_570x[] = {
12969 { 0x00000000, 0x00b50},
12970 { 0x00002000, 0x1c000},
12971 { 0xffffffff, 0x00000}
12972 }, mem_tbl_5705[] = {
12973 { 0x00000100, 0x0000c},
12974 { 0x00000200, 0x00008},
12975 { 0x00004000, 0x00800},
12976 { 0x00006000, 0x01000},
12977 { 0x00008000, 0x02000},
12978 { 0x00010000, 0x0e000},
12979 { 0xffffffff, 0x00000}
12980 }, mem_tbl_5755[] = {
12981 { 0x00000200, 0x00008},
12982 { 0x00004000, 0x00800},
12983 { 0x00006000, 0x00800},
12984 { 0x00008000, 0x02000},
12985 { 0x00010000, 0x0c000},
12986 { 0xffffffff, 0x00000}
12987 }, mem_tbl_5906[] = {
12988 { 0x00000200, 0x00008},
12989 { 0x00004000, 0x00400},
12990 { 0x00006000, 0x00400},
12991 { 0x00008000, 0x01000},
12992 { 0x00010000, 0x01000},
12993 { 0xffffffff, 0x00000}
12994 }, mem_tbl_5717[] = {
12995 { 0x00000200, 0x00008},
12996 { 0x00010000, 0x0a000},
12997 { 0x00020000, 0x13c00},
12998 { 0xffffffff, 0x00000}
12999 }, mem_tbl_57765[] = {
13000 { 0x00000200, 0x00008},
13001 { 0x00004000, 0x00800},
13002 { 0x00006000, 0x09800},
13003 { 0x00010000, 0x0a000},
13004 { 0xffffffff, 0x00000}
13006 struct mem_entry *mem_tbl;
13010 if (tg3_flag(tp, 5717_PLUS))
13011 mem_tbl = mem_tbl_5717;
13012 else if (tg3_flag(tp, 57765_CLASS) ||
13013 tg3_asic_rev(tp) == ASIC_REV_5762)
13014 mem_tbl = mem_tbl_57765;
13015 else if (tg3_flag(tp, 5755_PLUS))
13016 mem_tbl = mem_tbl_5755;
13017 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13018 mem_tbl = mem_tbl_5906;
13019 else if (tg3_flag(tp, 5705_PLUS))
13020 mem_tbl = mem_tbl_5705;
13022 mem_tbl = mem_tbl_570x;
13024 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13025 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13033 #define TG3_TSO_MSS 500
13035 #define TG3_TSO_IP_HDR_LEN 20
13036 #define TG3_TSO_TCP_HDR_LEN 20
13037 #define TG3_TSO_TCP_OPT_LEN 12
13039 static const u8 tg3_tso_header[] = {
13041 0x45, 0x00, 0x00, 0x00,
13042 0x00, 0x00, 0x40, 0x00,
13043 0x40, 0x06, 0x00, 0x00,
13044 0x0a, 0x00, 0x00, 0x01,
13045 0x0a, 0x00, 0x00, 0x02,
13046 0x0d, 0x00, 0xe0, 0x00,
13047 0x00, 0x00, 0x01, 0x00,
13048 0x00, 0x00, 0x02, 0x00,
13049 0x80, 0x10, 0x10, 0x00,
13050 0x14, 0x09, 0x00, 0x00,
13051 0x01, 0x01, 0x08, 0x0a,
13052 0x11, 0x11, 0x11, 0x11,
13053 0x11, 0x11, 0x11, 0x11,
13056 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13058 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13059 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13061 struct sk_buff *skb;
13062 u8 *tx_data, *rx_data;
13064 int num_pkts, tx_len, rx_len, i, err;
13065 struct tg3_rx_buffer_desc *desc;
13066 struct tg3_napi *tnapi, *rnapi;
13067 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13069 tnapi = &tp->napi[0];
13070 rnapi = &tp->napi[0];
13071 if (tp->irq_cnt > 1) {
13072 if (tg3_flag(tp, ENABLE_RSS))
13073 rnapi = &tp->napi[1];
13074 if (tg3_flag(tp, ENABLE_TSS))
13075 tnapi = &tp->napi[1];
13077 coal_now = tnapi->coal_now | rnapi->coal_now;
13082 skb = netdev_alloc_skb(tp->dev, tx_len);
13086 tx_data = skb_put(skb, tx_len);
13087 memcpy(tx_data, tp->dev->dev_addr, 6);
13088 memset(tx_data + 6, 0x0, 8);
13090 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13092 if (tso_loopback) {
13093 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13095 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13096 TG3_TSO_TCP_OPT_LEN;
13098 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13099 sizeof(tg3_tso_header));
13102 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13103 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13105 /* Set the total length field in the IP header */
13106 iph->tot_len = htons((u16)(mss + hdr_len));
13108 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13109 TXD_FLAG_CPU_POST_DMA);
13111 if (tg3_flag(tp, HW_TSO_1) ||
13112 tg3_flag(tp, HW_TSO_2) ||
13113 tg3_flag(tp, HW_TSO_3)) {
13115 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13116 th = (struct tcphdr *)&tx_data[val];
13119 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13121 if (tg3_flag(tp, HW_TSO_3)) {
13122 mss |= (hdr_len & 0xc) << 12;
13123 if (hdr_len & 0x10)
13124 base_flags |= 0x00000010;
13125 base_flags |= (hdr_len & 0x3e0) << 5;
13126 } else if (tg3_flag(tp, HW_TSO_2))
13127 mss |= hdr_len << 9;
13128 else if (tg3_flag(tp, HW_TSO_1) ||
13129 tg3_asic_rev(tp) == ASIC_REV_5705) {
13130 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13132 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13135 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13138 data_off = ETH_HLEN;
13140 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13141 tx_len > VLAN_ETH_FRAME_LEN)
13142 base_flags |= TXD_FLAG_JMB_PKT;
13145 for (i = data_off; i < tx_len; i++)
13146 tx_data[i] = (u8) (i & 0xff);
13148 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13149 if (pci_dma_mapping_error(tp->pdev, map)) {
13150 dev_kfree_skb(skb);
13154 val = tnapi->tx_prod;
13155 tnapi->tx_buffers[val].skb = skb;
13156 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13158 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13163 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13165 budget = tg3_tx_avail(tnapi);
13166 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13167 base_flags | TXD_FLAG_END, mss, 0)) {
13168 tnapi->tx_buffers[val].skb = NULL;
13169 dev_kfree_skb(skb);
13175 /* Sync BD data before updating mailbox */
13178 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13179 tr32_mailbox(tnapi->prodmbox);
13183 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13184 for (i = 0; i < 35; i++) {
13185 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13190 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13191 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13192 if ((tx_idx == tnapi->tx_prod) &&
13193 (rx_idx == (rx_start_idx + num_pkts)))
13197 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13198 dev_kfree_skb(skb);
13200 if (tx_idx != tnapi->tx_prod)
13203 if (rx_idx != rx_start_idx + num_pkts)
13207 while (rx_idx != rx_start_idx) {
13208 desc = &rnapi->rx_rcb[rx_start_idx++];
13209 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13210 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13212 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13213 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13216 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13219 if (!tso_loopback) {
13220 if (rx_len != tx_len)
13223 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13224 if (opaque_key != RXD_OPAQUE_RING_STD)
13227 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13230 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13231 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13232 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13236 if (opaque_key == RXD_OPAQUE_RING_STD) {
13237 rx_data = tpr->rx_std_buffers[desc_idx].data;
13238 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13240 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13241 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13242 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13247 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13248 PCI_DMA_FROMDEVICE);
13250 rx_data += TG3_RX_OFFSET(tp);
13251 for (i = data_off; i < rx_len; i++, val++) {
13252 if (*(rx_data + i) != (u8) (val & 0xff))
13259 /* tg3_free_rings will unmap and free the rx_data */
13264 #define TG3_STD_LOOPBACK_FAILED 1
13265 #define TG3_JMB_LOOPBACK_FAILED 2
13266 #define TG3_TSO_LOOPBACK_FAILED 4
13267 #define TG3_LOOPBACK_FAILED \
13268 (TG3_STD_LOOPBACK_FAILED | \
13269 TG3_JMB_LOOPBACK_FAILED | \
13270 TG3_TSO_LOOPBACK_FAILED)
13272 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13276 u32 jmb_pkt_sz = 9000;
13279 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13281 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13282 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13284 if (!netif_running(tp->dev)) {
13285 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13286 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13288 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13292 err = tg3_reset_hw(tp, true);
13294 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13295 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13297 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13301 if (tg3_flag(tp, ENABLE_RSS)) {
13304 /* Reroute all rx packets to the 1st queue */
13305 for (i = MAC_RSS_INDIR_TBL_0;
13306 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13310 /* HW errata - mac loopback fails in some cases on 5780.
13311 * Normal traffic and PHY loopback are not affected by
13312 * errata. Also, the MAC loopback test is deprecated for
13313 * all newer ASIC revisions.
13315 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13316 !tg3_flag(tp, CPMU_PRESENT)) {
13317 tg3_mac_loopback(tp, true);
13319 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13320 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13322 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13323 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13324 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13326 tg3_mac_loopback(tp, false);
13329 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13330 !tg3_flag(tp, USE_PHYLIB)) {
13333 tg3_phy_lpbk_set(tp, 0, false);
13335 /* Wait for link */
13336 for (i = 0; i < 100; i++) {
13337 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13342 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13343 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13344 if (tg3_flag(tp, TSO_CAPABLE) &&
13345 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13346 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13347 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13348 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13349 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13352 tg3_phy_lpbk_set(tp, 0, true);
13354 /* All link indications report up, but the hardware
13355 * isn't really ready for about 20 msec. Double it
13360 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13361 data[TG3_EXT_LOOPB_TEST] |=
13362 TG3_STD_LOOPBACK_FAILED;
13363 if (tg3_flag(tp, TSO_CAPABLE) &&
13364 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13365 data[TG3_EXT_LOOPB_TEST] |=
13366 TG3_TSO_LOOPBACK_FAILED;
13367 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13368 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13369 data[TG3_EXT_LOOPB_TEST] |=
13370 TG3_JMB_LOOPBACK_FAILED;
13373 /* Re-enable gphy autopowerdown. */
13374 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13375 tg3_phy_toggle_apd(tp, true);
13378 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13379 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13382 tp->phy_flags |= eee_cap;
13387 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13390 struct tg3 *tp = netdev_priv(dev);
13391 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13393 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13394 if (tg3_power_up(tp)) {
13395 etest->flags |= ETH_TEST_FL_FAILED;
13396 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13399 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13402 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13404 if (tg3_test_nvram(tp) != 0) {
13405 etest->flags |= ETH_TEST_FL_FAILED;
13406 data[TG3_NVRAM_TEST] = 1;
13408 if (!doextlpbk && tg3_test_link(tp)) {
13409 etest->flags |= ETH_TEST_FL_FAILED;
13410 data[TG3_LINK_TEST] = 1;
13412 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13413 int err, err2 = 0, irq_sync = 0;
13415 if (netif_running(dev)) {
13417 tg3_netif_stop(tp);
13421 tg3_full_lock(tp, irq_sync);
13422 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13423 err = tg3_nvram_lock(tp);
13424 tg3_halt_cpu(tp, RX_CPU_BASE);
13425 if (!tg3_flag(tp, 5705_PLUS))
13426 tg3_halt_cpu(tp, TX_CPU_BASE);
13428 tg3_nvram_unlock(tp);
13430 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13433 if (tg3_test_registers(tp) != 0) {
13434 etest->flags |= ETH_TEST_FL_FAILED;
13435 data[TG3_REGISTER_TEST] = 1;
13438 if (tg3_test_memory(tp) != 0) {
13439 etest->flags |= ETH_TEST_FL_FAILED;
13440 data[TG3_MEMORY_TEST] = 1;
13444 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13446 if (tg3_test_loopback(tp, data, doextlpbk))
13447 etest->flags |= ETH_TEST_FL_FAILED;
13449 tg3_full_unlock(tp);
13451 if (tg3_test_interrupt(tp) != 0) {
13452 etest->flags |= ETH_TEST_FL_FAILED;
13453 data[TG3_INTERRUPT_TEST] = 1;
13456 tg3_full_lock(tp, 0);
13458 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13459 if (netif_running(dev)) {
13460 tg3_flag_set(tp, INIT_COMPLETE);
13461 err2 = tg3_restart_hw(tp, true);
13463 tg3_netif_start(tp);
13466 tg3_full_unlock(tp);
13468 if (irq_sync && !err2)
13471 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13472 tg3_power_down(tp);
13476 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13477 struct ifreq *ifr, int cmd)
13479 struct tg3 *tp = netdev_priv(dev);
13480 struct hwtstamp_config stmpconf;
13482 if (!tg3_flag(tp, PTP_CAPABLE))
13485 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13488 if (stmpconf.flags)
13491 switch (stmpconf.tx_type) {
13492 case HWTSTAMP_TX_ON:
13493 tg3_flag_set(tp, TX_TSTAMP_EN);
13495 case HWTSTAMP_TX_OFF:
13496 tg3_flag_clear(tp, TX_TSTAMP_EN);
13502 switch (stmpconf.rx_filter) {
13503 case HWTSTAMP_FILTER_NONE:
13506 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13507 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13508 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13510 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13511 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13512 TG3_RX_PTP_CTL_SYNC_EVNT;
13514 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13515 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13516 TG3_RX_PTP_CTL_DELAY_REQ;
13518 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13519 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13520 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13522 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13523 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13524 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13526 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13527 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13528 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13530 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13531 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13532 TG3_RX_PTP_CTL_SYNC_EVNT;
13534 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13535 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13536 TG3_RX_PTP_CTL_SYNC_EVNT;
13538 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13539 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13540 TG3_RX_PTP_CTL_SYNC_EVNT;
13542 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13543 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13544 TG3_RX_PTP_CTL_DELAY_REQ;
13546 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13547 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13548 TG3_RX_PTP_CTL_DELAY_REQ;
13550 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13551 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13552 TG3_RX_PTP_CTL_DELAY_REQ;
13558 if (netif_running(dev) && tp->rxptpctl)
13559 tw32(TG3_RX_PTP_CTL,
13560 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13562 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13566 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13568 struct mii_ioctl_data *data = if_mii(ifr);
13569 struct tg3 *tp = netdev_priv(dev);
13572 if (tg3_flag(tp, USE_PHYLIB)) {
13573 struct phy_device *phydev;
13574 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13577 return phy_mii_ioctl(phydev, ifr, cmd);
13582 data->phy_id = tp->phy_addr;
13585 case SIOCGMIIREG: {
13588 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13589 break; /* We have no PHY */
13591 if (!netif_running(dev))
13594 spin_lock_bh(&tp->lock);
13595 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13596 data->reg_num & 0x1f, &mii_regval);
13597 spin_unlock_bh(&tp->lock);
13599 data->val_out = mii_regval;
13605 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13606 break; /* We have no PHY */
13608 if (!netif_running(dev))
13611 spin_lock_bh(&tp->lock);
13612 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13613 data->reg_num & 0x1f, data->val_in);
13614 spin_unlock_bh(&tp->lock);
13618 case SIOCSHWTSTAMP:
13619 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13625 return -EOPNOTSUPP;
13628 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13630 struct tg3 *tp = netdev_priv(dev);
13632 memcpy(ec, &tp->coal, sizeof(*ec));
13636 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13638 struct tg3 *tp = netdev_priv(dev);
13639 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13640 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13642 if (!tg3_flag(tp, 5705_PLUS)) {
13643 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13644 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13645 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13646 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13649 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13650 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13651 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13652 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13653 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13654 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13655 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13656 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13657 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13658 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13661 /* No rx interrupts will be generated if both are zero */
13662 if ((ec->rx_coalesce_usecs == 0) &&
13663 (ec->rx_max_coalesced_frames == 0))
13666 /* No tx interrupts will be generated if both are zero */
13667 if ((ec->tx_coalesce_usecs == 0) &&
13668 (ec->tx_max_coalesced_frames == 0))
13671 /* Only copy relevant parameters, ignore all others. */
13672 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13673 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13674 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13675 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13676 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13677 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13678 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13679 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13680 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13682 if (netif_running(dev)) {
13683 tg3_full_lock(tp, 0);
13684 __tg3_set_coalesce(tp, &tp->coal);
13685 tg3_full_unlock(tp);
13690 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13692 struct tg3 *tp = netdev_priv(dev);
13694 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13695 netdev_warn(tp->dev, "Board does not support EEE!\n");
13696 return -EOPNOTSUPP;
13699 if (edata->advertised != tp->eee.advertised) {
13700 netdev_warn(tp->dev,
13701 "Direct manipulation of EEE advertisement is not supported\n");
13705 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13706 netdev_warn(tp->dev,
13707 "Maximal Tx Lpi timer supported is %#x(u)\n",
13708 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13714 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13715 tg3_warn_mgmt_link_flap(tp);
13717 if (netif_running(tp->dev)) {
13718 tg3_full_lock(tp, 0);
13721 tg3_full_unlock(tp);
13727 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13729 struct tg3 *tp = netdev_priv(dev);
13731 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13732 netdev_warn(tp->dev,
13733 "Board does not support EEE!\n");
13734 return -EOPNOTSUPP;
13741 static const struct ethtool_ops tg3_ethtool_ops = {
13742 .get_settings = tg3_get_settings,
13743 .set_settings = tg3_set_settings,
13744 .get_drvinfo = tg3_get_drvinfo,
13745 .get_regs_len = tg3_get_regs_len,
13746 .get_regs = tg3_get_regs,
13747 .get_wol = tg3_get_wol,
13748 .set_wol = tg3_set_wol,
13749 .get_msglevel = tg3_get_msglevel,
13750 .set_msglevel = tg3_set_msglevel,
13751 .nway_reset = tg3_nway_reset,
13752 .get_link = ethtool_op_get_link,
13753 .get_eeprom_len = tg3_get_eeprom_len,
13754 .get_eeprom = tg3_get_eeprom,
13755 .set_eeprom = tg3_set_eeprom,
13756 .get_ringparam = tg3_get_ringparam,
13757 .set_ringparam = tg3_set_ringparam,
13758 .get_pauseparam = tg3_get_pauseparam,
13759 .set_pauseparam = tg3_set_pauseparam,
13760 .self_test = tg3_self_test,
13761 .get_strings = tg3_get_strings,
13762 .set_phys_id = tg3_set_phys_id,
13763 .get_ethtool_stats = tg3_get_ethtool_stats,
13764 .get_coalesce = tg3_get_coalesce,
13765 .set_coalesce = tg3_set_coalesce,
13766 .get_sset_count = tg3_get_sset_count,
13767 .get_rxnfc = tg3_get_rxnfc,
13768 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13769 .get_rxfh_indir = tg3_get_rxfh_indir,
13770 .set_rxfh_indir = tg3_set_rxfh_indir,
13771 .get_channels = tg3_get_channels,
13772 .set_channels = tg3_set_channels,
13773 .get_ts_info = tg3_get_ts_info,
13774 .get_eee = tg3_get_eee,
13775 .set_eee = tg3_set_eee,
13778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13779 struct rtnl_link_stats64 *stats)
13781 struct tg3 *tp = netdev_priv(dev);
13783 spin_lock_bh(&tp->lock);
13784 if (!tp->hw_stats) {
13785 spin_unlock_bh(&tp->lock);
13786 return &tp->net_stats_prev;
13789 tg3_get_nstats(tp, stats);
13790 spin_unlock_bh(&tp->lock);
13795 static void tg3_set_rx_mode(struct net_device *dev)
13797 struct tg3 *tp = netdev_priv(dev);
13799 if (!netif_running(dev))
13802 tg3_full_lock(tp, 0);
13803 __tg3_set_rx_mode(dev);
13804 tg3_full_unlock(tp);
13807 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13810 dev->mtu = new_mtu;
13812 if (new_mtu > ETH_DATA_LEN) {
13813 if (tg3_flag(tp, 5780_CLASS)) {
13814 netdev_update_features(dev);
13815 tg3_flag_clear(tp, TSO_CAPABLE);
13817 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13820 if (tg3_flag(tp, 5780_CLASS)) {
13821 tg3_flag_set(tp, TSO_CAPABLE);
13822 netdev_update_features(dev);
13824 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13828 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13830 struct tg3 *tp = netdev_priv(dev);
13832 bool reset_phy = false;
13834 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13837 if (!netif_running(dev)) {
13838 /* We'll just catch it later when the
13841 tg3_set_mtu(dev, tp, new_mtu);
13847 tg3_netif_stop(tp);
13849 tg3_full_lock(tp, 1);
13851 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13853 tg3_set_mtu(dev, tp, new_mtu);
13855 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13856 * breaks all requests to 256 bytes.
13858 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13861 err = tg3_restart_hw(tp, reset_phy);
13864 tg3_netif_start(tp);
13866 tg3_full_unlock(tp);
13874 static const struct net_device_ops tg3_netdev_ops = {
13875 .ndo_open = tg3_open,
13876 .ndo_stop = tg3_close,
13877 .ndo_start_xmit = tg3_start_xmit,
13878 .ndo_get_stats64 = tg3_get_stats64,
13879 .ndo_validate_addr = eth_validate_addr,
13880 .ndo_set_rx_mode = tg3_set_rx_mode,
13881 .ndo_set_mac_address = tg3_set_mac_addr,
13882 .ndo_do_ioctl = tg3_ioctl,
13883 .ndo_tx_timeout = tg3_tx_timeout,
13884 .ndo_change_mtu = tg3_change_mtu,
13885 .ndo_fix_features = tg3_fix_features,
13886 .ndo_set_features = tg3_set_features,
13887 #ifdef CONFIG_NET_POLL_CONTROLLER
13888 .ndo_poll_controller = tg3_poll_controller,
13892 static void tg3_get_eeprom_size(struct tg3 *tp)
13894 u32 cursize, val, magic;
13896 tp->nvram_size = EEPROM_CHIP_SIZE;
13898 if (tg3_nvram_read(tp, 0, &magic) != 0)
13901 if ((magic != TG3_EEPROM_MAGIC) &&
13902 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13903 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13907 * Size the chip by reading offsets at increasing powers of two.
13908 * When we encounter our validation signature, we know the addressing
13909 * has wrapped around, and thus have our chip size.
13913 while (cursize < tp->nvram_size) {
13914 if (tg3_nvram_read(tp, cursize, &val) != 0)
13923 tp->nvram_size = cursize;
13926 static void tg3_get_nvram_size(struct tg3 *tp)
13930 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13933 /* Selfboot format */
13934 if (val != TG3_EEPROM_MAGIC) {
13935 tg3_get_eeprom_size(tp);
13939 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13941 /* This is confusing. We want to operate on the
13942 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13943 * call will read from NVRAM and byteswap the data
13944 * according to the byteswapping settings for all
13945 * other register accesses. This ensures the data we
13946 * want will always reside in the lower 16-bits.
13947 * However, the data in NVRAM is in LE format, which
13948 * means the data from the NVRAM read will always be
13949 * opposite the endianness of the CPU. The 16-bit
13950 * byteswap then brings the data to CPU endianness.
13952 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13956 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13959 static void tg3_get_nvram_info(struct tg3 *tp)
13963 nvcfg1 = tr32(NVRAM_CFG1);
13964 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13965 tg3_flag_set(tp, FLASH);
13967 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13968 tw32(NVRAM_CFG1, nvcfg1);
13971 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13972 tg3_flag(tp, 5780_CLASS)) {
13973 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13974 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13975 tp->nvram_jedecnum = JEDEC_ATMEL;
13976 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13977 tg3_flag_set(tp, NVRAM_BUFFERED);
13979 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13980 tp->nvram_jedecnum = JEDEC_ATMEL;
13981 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13983 case FLASH_VENDOR_ATMEL_EEPROM:
13984 tp->nvram_jedecnum = JEDEC_ATMEL;
13985 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13986 tg3_flag_set(tp, NVRAM_BUFFERED);
13988 case FLASH_VENDOR_ST:
13989 tp->nvram_jedecnum = JEDEC_ST;
13990 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13991 tg3_flag_set(tp, NVRAM_BUFFERED);
13993 case FLASH_VENDOR_SAIFUN:
13994 tp->nvram_jedecnum = JEDEC_SAIFUN;
13995 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13997 case FLASH_VENDOR_SST_SMALL:
13998 case FLASH_VENDOR_SST_LARGE:
13999 tp->nvram_jedecnum = JEDEC_SST;
14000 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14004 tp->nvram_jedecnum = JEDEC_ATMEL;
14005 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14006 tg3_flag_set(tp, NVRAM_BUFFERED);
14010 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14012 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14013 case FLASH_5752PAGE_SIZE_256:
14014 tp->nvram_pagesize = 256;
14016 case FLASH_5752PAGE_SIZE_512:
14017 tp->nvram_pagesize = 512;
14019 case FLASH_5752PAGE_SIZE_1K:
14020 tp->nvram_pagesize = 1024;
14022 case FLASH_5752PAGE_SIZE_2K:
14023 tp->nvram_pagesize = 2048;
14025 case FLASH_5752PAGE_SIZE_4K:
14026 tp->nvram_pagesize = 4096;
14028 case FLASH_5752PAGE_SIZE_264:
14029 tp->nvram_pagesize = 264;
14031 case FLASH_5752PAGE_SIZE_528:
14032 tp->nvram_pagesize = 528;
14037 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14041 nvcfg1 = tr32(NVRAM_CFG1);
14043 /* NVRAM protection for TPM */
14044 if (nvcfg1 & (1 << 27))
14045 tg3_flag_set(tp, PROTECTED_NVRAM);
14047 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14048 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14049 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14050 tp->nvram_jedecnum = JEDEC_ATMEL;
14051 tg3_flag_set(tp, NVRAM_BUFFERED);
14053 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14054 tp->nvram_jedecnum = JEDEC_ATMEL;
14055 tg3_flag_set(tp, NVRAM_BUFFERED);
14056 tg3_flag_set(tp, FLASH);
14058 case FLASH_5752VENDOR_ST_M45PE10:
14059 case FLASH_5752VENDOR_ST_M45PE20:
14060 case FLASH_5752VENDOR_ST_M45PE40:
14061 tp->nvram_jedecnum = JEDEC_ST;
14062 tg3_flag_set(tp, NVRAM_BUFFERED);
14063 tg3_flag_set(tp, FLASH);
14067 if (tg3_flag(tp, FLASH)) {
14068 tg3_nvram_get_pagesize(tp, nvcfg1);
14070 /* For eeprom, set pagesize to maximum eeprom size */
14071 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14073 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14074 tw32(NVRAM_CFG1, nvcfg1);
14078 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14080 u32 nvcfg1, protect = 0;
14082 nvcfg1 = tr32(NVRAM_CFG1);
14084 /* NVRAM protection for TPM */
14085 if (nvcfg1 & (1 << 27)) {
14086 tg3_flag_set(tp, PROTECTED_NVRAM);
14090 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14092 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14093 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14094 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14095 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14096 tp->nvram_jedecnum = JEDEC_ATMEL;
14097 tg3_flag_set(tp, NVRAM_BUFFERED);
14098 tg3_flag_set(tp, FLASH);
14099 tp->nvram_pagesize = 264;
14100 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14101 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14102 tp->nvram_size = (protect ? 0x3e200 :
14103 TG3_NVRAM_SIZE_512KB);
14104 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14105 tp->nvram_size = (protect ? 0x1f200 :
14106 TG3_NVRAM_SIZE_256KB);
14108 tp->nvram_size = (protect ? 0x1f200 :
14109 TG3_NVRAM_SIZE_128KB);
14111 case FLASH_5752VENDOR_ST_M45PE10:
14112 case FLASH_5752VENDOR_ST_M45PE20:
14113 case FLASH_5752VENDOR_ST_M45PE40:
14114 tp->nvram_jedecnum = JEDEC_ST;
14115 tg3_flag_set(tp, NVRAM_BUFFERED);
14116 tg3_flag_set(tp, FLASH);
14117 tp->nvram_pagesize = 256;
14118 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14119 tp->nvram_size = (protect ?
14120 TG3_NVRAM_SIZE_64KB :
14121 TG3_NVRAM_SIZE_128KB);
14122 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14123 tp->nvram_size = (protect ?
14124 TG3_NVRAM_SIZE_64KB :
14125 TG3_NVRAM_SIZE_256KB);
14127 tp->nvram_size = (protect ?
14128 TG3_NVRAM_SIZE_128KB :
14129 TG3_NVRAM_SIZE_512KB);
14134 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14138 nvcfg1 = tr32(NVRAM_CFG1);
14140 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14141 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14142 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14143 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14144 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14145 tp->nvram_jedecnum = JEDEC_ATMEL;
14146 tg3_flag_set(tp, NVRAM_BUFFERED);
14147 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14149 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14150 tw32(NVRAM_CFG1, nvcfg1);
14152 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14153 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14154 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14155 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14156 tp->nvram_jedecnum = JEDEC_ATMEL;
14157 tg3_flag_set(tp, NVRAM_BUFFERED);
14158 tg3_flag_set(tp, FLASH);
14159 tp->nvram_pagesize = 264;
14161 case FLASH_5752VENDOR_ST_M45PE10:
14162 case FLASH_5752VENDOR_ST_M45PE20:
14163 case FLASH_5752VENDOR_ST_M45PE40:
14164 tp->nvram_jedecnum = JEDEC_ST;
14165 tg3_flag_set(tp, NVRAM_BUFFERED);
14166 tg3_flag_set(tp, FLASH);
14167 tp->nvram_pagesize = 256;
14172 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14174 u32 nvcfg1, protect = 0;
14176 nvcfg1 = tr32(NVRAM_CFG1);
14178 /* NVRAM protection for TPM */
14179 if (nvcfg1 & (1 << 27)) {
14180 tg3_flag_set(tp, PROTECTED_NVRAM);
14184 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14186 case FLASH_5761VENDOR_ATMEL_ADB021D:
14187 case FLASH_5761VENDOR_ATMEL_ADB041D:
14188 case FLASH_5761VENDOR_ATMEL_ADB081D:
14189 case FLASH_5761VENDOR_ATMEL_ADB161D:
14190 case FLASH_5761VENDOR_ATMEL_MDB021D:
14191 case FLASH_5761VENDOR_ATMEL_MDB041D:
14192 case FLASH_5761VENDOR_ATMEL_MDB081D:
14193 case FLASH_5761VENDOR_ATMEL_MDB161D:
14194 tp->nvram_jedecnum = JEDEC_ATMEL;
14195 tg3_flag_set(tp, NVRAM_BUFFERED);
14196 tg3_flag_set(tp, FLASH);
14197 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14198 tp->nvram_pagesize = 256;
14200 case FLASH_5761VENDOR_ST_A_M45PE20:
14201 case FLASH_5761VENDOR_ST_A_M45PE40:
14202 case FLASH_5761VENDOR_ST_A_M45PE80:
14203 case FLASH_5761VENDOR_ST_A_M45PE16:
14204 case FLASH_5761VENDOR_ST_M_M45PE20:
14205 case FLASH_5761VENDOR_ST_M_M45PE40:
14206 case FLASH_5761VENDOR_ST_M_M45PE80:
14207 case FLASH_5761VENDOR_ST_M_M45PE16:
14208 tp->nvram_jedecnum = JEDEC_ST;
14209 tg3_flag_set(tp, NVRAM_BUFFERED);
14210 tg3_flag_set(tp, FLASH);
14211 tp->nvram_pagesize = 256;
14216 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14219 case FLASH_5761VENDOR_ATMEL_ADB161D:
14220 case FLASH_5761VENDOR_ATMEL_MDB161D:
14221 case FLASH_5761VENDOR_ST_A_M45PE16:
14222 case FLASH_5761VENDOR_ST_M_M45PE16:
14223 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14225 case FLASH_5761VENDOR_ATMEL_ADB081D:
14226 case FLASH_5761VENDOR_ATMEL_MDB081D:
14227 case FLASH_5761VENDOR_ST_A_M45PE80:
14228 case FLASH_5761VENDOR_ST_M_M45PE80:
14229 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14231 case FLASH_5761VENDOR_ATMEL_ADB041D:
14232 case FLASH_5761VENDOR_ATMEL_MDB041D:
14233 case FLASH_5761VENDOR_ST_A_M45PE40:
14234 case FLASH_5761VENDOR_ST_M_M45PE40:
14235 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14237 case FLASH_5761VENDOR_ATMEL_ADB021D:
14238 case FLASH_5761VENDOR_ATMEL_MDB021D:
14239 case FLASH_5761VENDOR_ST_A_M45PE20:
14240 case FLASH_5761VENDOR_ST_M_M45PE20:
14241 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14247 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14249 tp->nvram_jedecnum = JEDEC_ATMEL;
14250 tg3_flag_set(tp, NVRAM_BUFFERED);
14251 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14254 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14258 nvcfg1 = tr32(NVRAM_CFG1);
14260 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14261 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14262 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14263 tp->nvram_jedecnum = JEDEC_ATMEL;
14264 tg3_flag_set(tp, NVRAM_BUFFERED);
14265 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14267 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14268 tw32(NVRAM_CFG1, nvcfg1);
14270 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14271 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14272 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14273 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14274 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14275 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14276 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14277 tp->nvram_jedecnum = JEDEC_ATMEL;
14278 tg3_flag_set(tp, NVRAM_BUFFERED);
14279 tg3_flag_set(tp, FLASH);
14281 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14282 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14283 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14284 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14285 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14287 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14288 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14289 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14291 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14292 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14293 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14297 case FLASH_5752VENDOR_ST_M45PE10:
14298 case FLASH_5752VENDOR_ST_M45PE20:
14299 case FLASH_5752VENDOR_ST_M45PE40:
14300 tp->nvram_jedecnum = JEDEC_ST;
14301 tg3_flag_set(tp, NVRAM_BUFFERED);
14302 tg3_flag_set(tp, FLASH);
14304 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14305 case FLASH_5752VENDOR_ST_M45PE10:
14306 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14308 case FLASH_5752VENDOR_ST_M45PE20:
14309 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14311 case FLASH_5752VENDOR_ST_M45PE40:
14312 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14317 tg3_flag_set(tp, NO_NVRAM);
14321 tg3_nvram_get_pagesize(tp, nvcfg1);
14322 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14323 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14327 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14331 nvcfg1 = tr32(NVRAM_CFG1);
14333 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14334 case FLASH_5717VENDOR_ATMEL_EEPROM:
14335 case FLASH_5717VENDOR_MICRO_EEPROM:
14336 tp->nvram_jedecnum = JEDEC_ATMEL;
14337 tg3_flag_set(tp, NVRAM_BUFFERED);
14338 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14340 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14341 tw32(NVRAM_CFG1, nvcfg1);
14343 case FLASH_5717VENDOR_ATMEL_MDB011D:
14344 case FLASH_5717VENDOR_ATMEL_ADB011B:
14345 case FLASH_5717VENDOR_ATMEL_ADB011D:
14346 case FLASH_5717VENDOR_ATMEL_MDB021D:
14347 case FLASH_5717VENDOR_ATMEL_ADB021B:
14348 case FLASH_5717VENDOR_ATMEL_ADB021D:
14349 case FLASH_5717VENDOR_ATMEL_45USPT:
14350 tp->nvram_jedecnum = JEDEC_ATMEL;
14351 tg3_flag_set(tp, NVRAM_BUFFERED);
14352 tg3_flag_set(tp, FLASH);
14354 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14355 case FLASH_5717VENDOR_ATMEL_MDB021D:
14356 /* Detect size with tg3_nvram_get_size() */
14358 case FLASH_5717VENDOR_ATMEL_ADB021B:
14359 case FLASH_5717VENDOR_ATMEL_ADB021D:
14360 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14363 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14367 case FLASH_5717VENDOR_ST_M_M25PE10:
14368 case FLASH_5717VENDOR_ST_A_M25PE10:
14369 case FLASH_5717VENDOR_ST_M_M45PE10:
14370 case FLASH_5717VENDOR_ST_A_M45PE10:
14371 case FLASH_5717VENDOR_ST_M_M25PE20:
14372 case FLASH_5717VENDOR_ST_A_M25PE20:
14373 case FLASH_5717VENDOR_ST_M_M45PE20:
14374 case FLASH_5717VENDOR_ST_A_M45PE20:
14375 case FLASH_5717VENDOR_ST_25USPT:
14376 case FLASH_5717VENDOR_ST_45USPT:
14377 tp->nvram_jedecnum = JEDEC_ST;
14378 tg3_flag_set(tp, NVRAM_BUFFERED);
14379 tg3_flag_set(tp, FLASH);
14381 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14382 case FLASH_5717VENDOR_ST_M_M25PE20:
14383 case FLASH_5717VENDOR_ST_M_M45PE20:
14384 /* Detect size with tg3_nvram_get_size() */
14386 case FLASH_5717VENDOR_ST_A_M25PE20:
14387 case FLASH_5717VENDOR_ST_A_M45PE20:
14388 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14391 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14396 tg3_flag_set(tp, NO_NVRAM);
14400 tg3_nvram_get_pagesize(tp, nvcfg1);
14401 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14402 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14405 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14407 u32 nvcfg1, nvmpinstrp;
14409 nvcfg1 = tr32(NVRAM_CFG1);
14410 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14412 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14413 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14414 tg3_flag_set(tp, NO_NVRAM);
14418 switch (nvmpinstrp) {
14419 case FLASH_5762_EEPROM_HD:
14420 nvmpinstrp = FLASH_5720_EEPROM_HD;
14422 case FLASH_5762_EEPROM_LD:
14423 nvmpinstrp = FLASH_5720_EEPROM_LD;
14425 case FLASH_5720VENDOR_M_ST_M45PE20:
14426 /* This pinstrap supports multiple sizes, so force it
14427 * to read the actual size from location 0xf0.
14429 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14434 switch (nvmpinstrp) {
14435 case FLASH_5720_EEPROM_HD:
14436 case FLASH_5720_EEPROM_LD:
14437 tp->nvram_jedecnum = JEDEC_ATMEL;
14438 tg3_flag_set(tp, NVRAM_BUFFERED);
14440 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14441 tw32(NVRAM_CFG1, nvcfg1);
14442 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14443 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14445 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14447 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14448 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14449 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14450 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14451 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14452 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14453 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14454 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14455 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14456 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14457 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14458 case FLASH_5720VENDOR_ATMEL_45USPT:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tg3_flag_set(tp, NVRAM_BUFFERED);
14461 tg3_flag_set(tp, FLASH);
14463 switch (nvmpinstrp) {
14464 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14465 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14466 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14467 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14469 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14470 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14471 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14472 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14474 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14475 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14476 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14479 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14480 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14484 case FLASH_5720VENDOR_M_ST_M25PE10:
14485 case FLASH_5720VENDOR_M_ST_M45PE10:
14486 case FLASH_5720VENDOR_A_ST_M25PE10:
14487 case FLASH_5720VENDOR_A_ST_M45PE10:
14488 case FLASH_5720VENDOR_M_ST_M25PE20:
14489 case FLASH_5720VENDOR_M_ST_M45PE20:
14490 case FLASH_5720VENDOR_A_ST_M25PE20:
14491 case FLASH_5720VENDOR_A_ST_M45PE20:
14492 case FLASH_5720VENDOR_M_ST_M25PE40:
14493 case FLASH_5720VENDOR_M_ST_M45PE40:
14494 case FLASH_5720VENDOR_A_ST_M25PE40:
14495 case FLASH_5720VENDOR_A_ST_M45PE40:
14496 case FLASH_5720VENDOR_M_ST_M25PE80:
14497 case FLASH_5720VENDOR_M_ST_M45PE80:
14498 case FLASH_5720VENDOR_A_ST_M25PE80:
14499 case FLASH_5720VENDOR_A_ST_M45PE80:
14500 case FLASH_5720VENDOR_ST_25USPT:
14501 case FLASH_5720VENDOR_ST_45USPT:
14502 tp->nvram_jedecnum = JEDEC_ST;
14503 tg3_flag_set(tp, NVRAM_BUFFERED);
14504 tg3_flag_set(tp, FLASH);
14506 switch (nvmpinstrp) {
14507 case FLASH_5720VENDOR_M_ST_M25PE20:
14508 case FLASH_5720VENDOR_M_ST_M45PE20:
14509 case FLASH_5720VENDOR_A_ST_M25PE20:
14510 case FLASH_5720VENDOR_A_ST_M45PE20:
14511 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14513 case FLASH_5720VENDOR_M_ST_M25PE40:
14514 case FLASH_5720VENDOR_M_ST_M45PE40:
14515 case FLASH_5720VENDOR_A_ST_M25PE40:
14516 case FLASH_5720VENDOR_A_ST_M45PE40:
14517 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14519 case FLASH_5720VENDOR_M_ST_M25PE80:
14520 case FLASH_5720VENDOR_M_ST_M45PE80:
14521 case FLASH_5720VENDOR_A_ST_M25PE80:
14522 case FLASH_5720VENDOR_A_ST_M45PE80:
14523 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14526 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14527 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14532 tg3_flag_set(tp, NO_NVRAM);
14536 tg3_nvram_get_pagesize(tp, nvcfg1);
14537 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14538 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14540 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14543 if (tg3_nvram_read(tp, 0, &val))
14546 if (val != TG3_EEPROM_MAGIC &&
14547 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14548 tg3_flag_set(tp, NO_NVRAM);
14552 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14553 static void tg3_nvram_init(struct tg3 *tp)
14555 if (tg3_flag(tp, IS_SSB_CORE)) {
14556 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14557 tg3_flag_clear(tp, NVRAM);
14558 tg3_flag_clear(tp, NVRAM_BUFFERED);
14559 tg3_flag_set(tp, NO_NVRAM);
14563 tw32_f(GRC_EEPROM_ADDR,
14564 (EEPROM_ADDR_FSM_RESET |
14565 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14566 EEPROM_ADDR_CLKPERD_SHIFT)));
14570 /* Enable seeprom accesses. */
14571 tw32_f(GRC_LOCAL_CTRL,
14572 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14575 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14576 tg3_asic_rev(tp) != ASIC_REV_5701) {
14577 tg3_flag_set(tp, NVRAM);
14579 if (tg3_nvram_lock(tp)) {
14580 netdev_warn(tp->dev,
14581 "Cannot get nvram lock, %s failed\n",
14585 tg3_enable_nvram_access(tp);
14587 tp->nvram_size = 0;
14589 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14590 tg3_get_5752_nvram_info(tp);
14591 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14592 tg3_get_5755_nvram_info(tp);
14593 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14594 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14595 tg3_asic_rev(tp) == ASIC_REV_5785)
14596 tg3_get_5787_nvram_info(tp);
14597 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14598 tg3_get_5761_nvram_info(tp);
14599 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14600 tg3_get_5906_nvram_info(tp);
14601 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14602 tg3_flag(tp, 57765_CLASS))
14603 tg3_get_57780_nvram_info(tp);
14604 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14605 tg3_asic_rev(tp) == ASIC_REV_5719)
14606 tg3_get_5717_nvram_info(tp);
14607 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14608 tg3_asic_rev(tp) == ASIC_REV_5762)
14609 tg3_get_5720_nvram_info(tp);
14611 tg3_get_nvram_info(tp);
14613 if (tp->nvram_size == 0)
14614 tg3_get_nvram_size(tp);
14616 tg3_disable_nvram_access(tp);
14617 tg3_nvram_unlock(tp);
14620 tg3_flag_clear(tp, NVRAM);
14621 tg3_flag_clear(tp, NVRAM_BUFFERED);
14623 tg3_get_eeprom_size(tp);
14627 struct subsys_tbl_ent {
14628 u16 subsys_vendor, subsys_devid;
14632 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14633 /* Broadcom boards. */
14634 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14635 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14636 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14637 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14638 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14639 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14640 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14641 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14642 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14643 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14644 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14645 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14646 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14647 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14648 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14649 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14650 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14651 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14652 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14653 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14654 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14655 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14658 { TG3PCI_SUBVENDOR_ID_3COM,
14659 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14660 { TG3PCI_SUBVENDOR_ID_3COM,
14661 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14662 { TG3PCI_SUBVENDOR_ID_3COM,
14663 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14664 { TG3PCI_SUBVENDOR_ID_3COM,
14665 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14666 { TG3PCI_SUBVENDOR_ID_3COM,
14667 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14670 { TG3PCI_SUBVENDOR_ID_DELL,
14671 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14672 { TG3PCI_SUBVENDOR_ID_DELL,
14673 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14674 { TG3PCI_SUBVENDOR_ID_DELL,
14675 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14676 { TG3PCI_SUBVENDOR_ID_DELL,
14677 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14679 /* Compaq boards. */
14680 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14681 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14682 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14683 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14684 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14685 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14686 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14687 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14688 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14689 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14692 { TG3PCI_SUBVENDOR_ID_IBM,
14693 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14696 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14700 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14701 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14702 tp->pdev->subsystem_vendor) &&
14703 (subsys_id_to_phy_id[i].subsys_devid ==
14704 tp->pdev->subsystem_device))
14705 return &subsys_id_to_phy_id[i];
14710 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14714 tp->phy_id = TG3_PHY_ID_INVALID;
14715 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14717 /* Assume an onboard device and WOL capable by default. */
14718 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14719 tg3_flag_set(tp, WOL_CAP);
14721 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14722 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14723 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14724 tg3_flag_set(tp, IS_NIC);
14726 val = tr32(VCPU_CFGSHDW);
14727 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14728 tg3_flag_set(tp, ASPM_WORKAROUND);
14729 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14730 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14731 tg3_flag_set(tp, WOL_ENABLE);
14732 device_set_wakeup_enable(&tp->pdev->dev, true);
14737 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14738 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14739 u32 nic_cfg, led_cfg;
14740 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14741 int eeprom_phy_serdes = 0;
14743 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14744 tp->nic_sram_data_cfg = nic_cfg;
14746 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14747 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14748 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14749 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14750 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14751 (ver > 0) && (ver < 0x100))
14752 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14754 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14755 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14757 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14758 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14759 eeprom_phy_serdes = 1;
14761 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14762 if (nic_phy_id != 0) {
14763 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14764 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14766 eeprom_phy_id = (id1 >> 16) << 10;
14767 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14768 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14772 tp->phy_id = eeprom_phy_id;
14773 if (eeprom_phy_serdes) {
14774 if (!tg3_flag(tp, 5705_PLUS))
14775 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14777 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14780 if (tg3_flag(tp, 5750_PLUS))
14781 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14782 SHASTA_EXT_LED_MODE_MASK);
14784 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14788 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14789 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14792 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14793 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14796 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14797 tp->led_ctrl = LED_CTRL_MODE_MAC;
14799 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14800 * read on some older 5700/5701 bootcode.
14802 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14803 tg3_asic_rev(tp) == ASIC_REV_5701)
14804 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14808 case SHASTA_EXT_LED_SHARED:
14809 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14810 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14811 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14812 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14813 LED_CTRL_MODE_PHY_2);
14816 case SHASTA_EXT_LED_MAC:
14817 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14820 case SHASTA_EXT_LED_COMBO:
14821 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14822 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14823 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14824 LED_CTRL_MODE_PHY_2);
14829 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14830 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14831 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14832 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14834 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14835 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14837 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14838 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14839 if ((tp->pdev->subsystem_vendor ==
14840 PCI_VENDOR_ID_ARIMA) &&
14841 (tp->pdev->subsystem_device == 0x205a ||
14842 tp->pdev->subsystem_device == 0x2063))
14843 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14845 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14846 tg3_flag_set(tp, IS_NIC);
14849 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14850 tg3_flag_set(tp, ENABLE_ASF);
14851 if (tg3_flag(tp, 5750_PLUS))
14852 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14855 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14856 tg3_flag(tp, 5750_PLUS))
14857 tg3_flag_set(tp, ENABLE_APE);
14859 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14860 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14861 tg3_flag_clear(tp, WOL_CAP);
14863 if (tg3_flag(tp, WOL_CAP) &&
14864 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14865 tg3_flag_set(tp, WOL_ENABLE);
14866 device_set_wakeup_enable(&tp->pdev->dev, true);
14869 if (cfg2 & (1 << 17))
14870 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14872 /* serdes signal pre-emphasis in register 0x590 set by */
14873 /* bootcode if bit 18 is set */
14874 if (cfg2 & (1 << 18))
14875 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14877 if ((tg3_flag(tp, 57765_PLUS) ||
14878 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14879 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14880 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14881 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14883 if (tg3_flag(tp, PCI_EXPRESS)) {
14886 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14887 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14888 !tg3_flag(tp, 57765_PLUS) &&
14889 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14890 tg3_flag_set(tp, ASPM_WORKAROUND);
14891 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14892 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14893 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14894 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14897 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14898 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14899 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14900 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14901 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14902 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14905 if (tg3_flag(tp, WOL_CAP))
14906 device_set_wakeup_enable(&tp->pdev->dev,
14907 tg3_flag(tp, WOL_ENABLE));
14909 device_set_wakeup_capable(&tp->pdev->dev, false);
14912 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14915 u32 val2, off = offset * 8;
14917 err = tg3_nvram_lock(tp);
14921 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14922 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14923 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14924 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14927 for (i = 0; i < 100; i++) {
14928 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14929 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14930 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14936 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14938 tg3_nvram_unlock(tp);
14939 if (val2 & APE_OTP_STATUS_CMD_DONE)
14945 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14950 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14951 tw32(OTP_CTRL, cmd);
14953 /* Wait for up to 1 ms for command to execute. */
14954 for (i = 0; i < 100; i++) {
14955 val = tr32(OTP_STATUS);
14956 if (val & OTP_STATUS_CMD_DONE)
14961 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14964 /* Read the gphy configuration from the OTP region of the chip. The gphy
14965 * configuration is a 32-bit value that straddles the alignment boundary.
14966 * We do two 32-bit reads and then shift and merge the results.
14968 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14970 u32 bhalf_otp, thalf_otp;
14972 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14974 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14977 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14979 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14982 thalf_otp = tr32(OTP_READ_DATA);
14984 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14986 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14989 bhalf_otp = tr32(OTP_READ_DATA);
14991 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14994 static void tg3_phy_init_link_config(struct tg3 *tp)
14996 u32 adv = ADVERTISED_Autoneg;
14998 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14999 adv |= ADVERTISED_1000baseT_Half |
15000 ADVERTISED_1000baseT_Full;
15002 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15003 adv |= ADVERTISED_100baseT_Half |
15004 ADVERTISED_100baseT_Full |
15005 ADVERTISED_10baseT_Half |
15006 ADVERTISED_10baseT_Full |
15009 adv |= ADVERTISED_FIBRE;
15011 tp->link_config.advertising = adv;
15012 tp->link_config.speed = SPEED_UNKNOWN;
15013 tp->link_config.duplex = DUPLEX_UNKNOWN;
15014 tp->link_config.autoneg = AUTONEG_ENABLE;
15015 tp->link_config.active_speed = SPEED_UNKNOWN;
15016 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15021 static int tg3_phy_probe(struct tg3 *tp)
15023 u32 hw_phy_id_1, hw_phy_id_2;
15024 u32 hw_phy_id, hw_phy_id_masked;
15027 /* flow control autonegotiation is default behavior */
15028 tg3_flag_set(tp, PAUSE_AUTONEG);
15029 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15031 if (tg3_flag(tp, ENABLE_APE)) {
15032 switch (tp->pci_fn) {
15034 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15037 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15040 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15043 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15048 if (!tg3_flag(tp, ENABLE_ASF) &&
15049 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15050 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15051 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15052 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15054 if (tg3_flag(tp, USE_PHYLIB))
15055 return tg3_phy_init(tp);
15057 /* Reading the PHY ID register can conflict with ASF
15058 * firmware access to the PHY hardware.
15061 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15062 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15064 /* Now read the physical PHY_ID from the chip and verify
15065 * that it is sane. If it doesn't look good, we fall back
15066 * to either the hard-coded table based PHY_ID and failing
15067 * that the value found in the eeprom area.
15069 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15070 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15072 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15073 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15074 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15076 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15079 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15080 tp->phy_id = hw_phy_id;
15081 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15082 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15084 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15086 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15087 /* Do nothing, phy ID already set up in
15088 * tg3_get_eeprom_hw_cfg().
15091 struct subsys_tbl_ent *p;
15093 /* No eeprom signature? Try the hardcoded
15094 * subsys device table.
15096 p = tg3_lookup_by_subsys(tp);
15098 tp->phy_id = p->phy_id;
15099 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15100 /* For now we saw the IDs 0xbc050cd0,
15101 * 0xbc050f80 and 0xbc050c30 on devices
15102 * connected to an BCM4785 and there are
15103 * probably more. Just assume that the phy is
15104 * supported when it is connected to a SSB core
15111 tp->phy_id == TG3_PHY_ID_BCM8002)
15112 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15116 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15117 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15118 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15119 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15120 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15121 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15122 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15123 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15124 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15125 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15127 tp->eee.supported = SUPPORTED_100baseT_Full |
15128 SUPPORTED_1000baseT_Full;
15129 tp->eee.advertised = ADVERTISED_100baseT_Full |
15130 ADVERTISED_1000baseT_Full;
15131 tp->eee.eee_enabled = 1;
15132 tp->eee.tx_lpi_enabled = 1;
15133 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15136 tg3_phy_init_link_config(tp);
15138 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15139 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15140 !tg3_flag(tp, ENABLE_APE) &&
15141 !tg3_flag(tp, ENABLE_ASF)) {
15144 tg3_readphy(tp, MII_BMSR, &bmsr);
15145 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15146 (bmsr & BMSR_LSTATUS))
15147 goto skip_phy_reset;
15149 err = tg3_phy_reset(tp);
15153 tg3_phy_set_wirespeed(tp);
15155 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15156 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15157 tp->link_config.flowctrl);
15159 tg3_writephy(tp, MII_BMCR,
15160 BMCR_ANENABLE | BMCR_ANRESTART);
15165 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15166 err = tg3_init_5401phy_dsp(tp);
15170 err = tg3_init_5401phy_dsp(tp);
15176 static void tg3_read_vpd(struct tg3 *tp)
15179 unsigned int block_end, rosize, len;
15183 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15187 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15189 goto out_not_found;
15191 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15192 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15193 i += PCI_VPD_LRDT_TAG_SIZE;
15195 if (block_end > vpdlen)
15196 goto out_not_found;
15198 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15199 PCI_VPD_RO_KEYWORD_MFR_ID);
15201 len = pci_vpd_info_field_size(&vpd_data[j]);
15203 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15204 if (j + len > block_end || len != 4 ||
15205 memcmp(&vpd_data[j], "1028", 4))
15208 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15209 PCI_VPD_RO_KEYWORD_VENDOR0);
15213 len = pci_vpd_info_field_size(&vpd_data[j]);
15215 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15216 if (j + len > block_end)
15219 if (len >= sizeof(tp->fw_ver))
15220 len = sizeof(tp->fw_ver) - 1;
15221 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15222 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15227 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15228 PCI_VPD_RO_KEYWORD_PARTNO);
15230 goto out_not_found;
15232 len = pci_vpd_info_field_size(&vpd_data[i]);
15234 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15235 if (len > TG3_BPN_SIZE ||
15236 (len + i) > vpdlen)
15237 goto out_not_found;
15239 memcpy(tp->board_part_number, &vpd_data[i], len);
15243 if (tp->board_part_number[0])
15247 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15248 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15249 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15250 strcpy(tp->board_part_number, "BCM5717");
15251 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15252 strcpy(tp->board_part_number, "BCM5718");
15255 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15256 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15257 strcpy(tp->board_part_number, "BCM57780");
15258 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15259 strcpy(tp->board_part_number, "BCM57760");
15260 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15261 strcpy(tp->board_part_number, "BCM57790");
15262 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15263 strcpy(tp->board_part_number, "BCM57788");
15266 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15267 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15268 strcpy(tp->board_part_number, "BCM57761");
15269 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15270 strcpy(tp->board_part_number, "BCM57765");
15271 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15272 strcpy(tp->board_part_number, "BCM57781");
15273 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15274 strcpy(tp->board_part_number, "BCM57785");
15275 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15276 strcpy(tp->board_part_number, "BCM57791");
15277 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15278 strcpy(tp->board_part_number, "BCM57795");
15281 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15282 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15283 strcpy(tp->board_part_number, "BCM57762");
15284 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15285 strcpy(tp->board_part_number, "BCM57766");
15286 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15287 strcpy(tp->board_part_number, "BCM57782");
15288 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15289 strcpy(tp->board_part_number, "BCM57786");
15292 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15293 strcpy(tp->board_part_number, "BCM95906");
15296 strcpy(tp->board_part_number, "none");
15300 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15304 if (tg3_nvram_read(tp, offset, &val) ||
15305 (val & 0xfc000000) != 0x0c000000 ||
15306 tg3_nvram_read(tp, offset + 4, &val) ||
15313 static void tg3_read_bc_ver(struct tg3 *tp)
15315 u32 val, offset, start, ver_offset;
15317 bool newver = false;
15319 if (tg3_nvram_read(tp, 0xc, &offset) ||
15320 tg3_nvram_read(tp, 0x4, &start))
15323 offset = tg3_nvram_logical_addr(tp, offset);
15325 if (tg3_nvram_read(tp, offset, &val))
15328 if ((val & 0xfc000000) == 0x0c000000) {
15329 if (tg3_nvram_read(tp, offset + 4, &val))
15336 dst_off = strlen(tp->fw_ver);
15339 if (TG3_VER_SIZE - dst_off < 16 ||
15340 tg3_nvram_read(tp, offset + 8, &ver_offset))
15343 offset = offset + ver_offset - start;
15344 for (i = 0; i < 16; i += 4) {
15346 if (tg3_nvram_read_be32(tp, offset + i, &v))
15349 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15354 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15357 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15358 TG3_NVM_BCVER_MAJSFT;
15359 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15360 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15361 "v%d.%02d", major, minor);
15365 static void tg3_read_hwsb_ver(struct tg3 *tp)
15367 u32 val, major, minor;
15369 /* Use native endian representation */
15370 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15373 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15374 TG3_NVM_HWSB_CFG1_MAJSFT;
15375 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15376 TG3_NVM_HWSB_CFG1_MINSFT;
15378 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15381 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15383 u32 offset, major, minor, build;
15385 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15387 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15390 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15391 case TG3_EEPROM_SB_REVISION_0:
15392 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15394 case TG3_EEPROM_SB_REVISION_2:
15395 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15397 case TG3_EEPROM_SB_REVISION_3:
15398 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15400 case TG3_EEPROM_SB_REVISION_4:
15401 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15403 case TG3_EEPROM_SB_REVISION_5:
15404 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15406 case TG3_EEPROM_SB_REVISION_6:
15407 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15413 if (tg3_nvram_read(tp, offset, &val))
15416 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15417 TG3_EEPROM_SB_EDH_BLD_SHFT;
15418 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15419 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15420 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15422 if (minor > 99 || build > 26)
15425 offset = strlen(tp->fw_ver);
15426 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15427 " v%d.%02d", major, minor);
15430 offset = strlen(tp->fw_ver);
15431 if (offset < TG3_VER_SIZE - 1)
15432 tp->fw_ver[offset] = 'a' + build - 1;
15436 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15438 u32 val, offset, start;
15441 for (offset = TG3_NVM_DIR_START;
15442 offset < TG3_NVM_DIR_END;
15443 offset += TG3_NVM_DIRENT_SIZE) {
15444 if (tg3_nvram_read(tp, offset, &val))
15447 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15451 if (offset == TG3_NVM_DIR_END)
15454 if (!tg3_flag(tp, 5705_PLUS))
15455 start = 0x08000000;
15456 else if (tg3_nvram_read(tp, offset - 4, &start))
15459 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15460 !tg3_fw_img_is_valid(tp, offset) ||
15461 tg3_nvram_read(tp, offset + 8, &val))
15464 offset += val - start;
15466 vlen = strlen(tp->fw_ver);
15468 tp->fw_ver[vlen++] = ',';
15469 tp->fw_ver[vlen++] = ' ';
15471 for (i = 0; i < 4; i++) {
15473 if (tg3_nvram_read_be32(tp, offset, &v))
15476 offset += sizeof(v);
15478 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15479 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15483 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15488 static void tg3_probe_ncsi(struct tg3 *tp)
15492 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15493 if (apedata != APE_SEG_SIG_MAGIC)
15496 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15497 if (!(apedata & APE_FW_STATUS_READY))
15500 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15501 tg3_flag_set(tp, APE_HAS_NCSI);
15504 static void tg3_read_dash_ver(struct tg3 *tp)
15510 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15512 if (tg3_flag(tp, APE_HAS_NCSI))
15514 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15519 vlen = strlen(tp->fw_ver);
15521 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15523 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15524 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15525 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15526 (apedata & APE_FW_VERSION_BLDMSK));
15529 static void tg3_read_otp_ver(struct tg3 *tp)
15533 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15536 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15537 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15538 TG3_OTP_MAGIC0_VALID(val)) {
15539 u64 val64 = (u64) val << 32 | val2;
15543 for (i = 0; i < 7; i++) {
15544 if ((val64 & 0xff) == 0)
15546 ver = val64 & 0xff;
15549 vlen = strlen(tp->fw_ver);
15550 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15554 static void tg3_read_fw_ver(struct tg3 *tp)
15557 bool vpd_vers = false;
15559 if (tp->fw_ver[0] != 0)
15562 if (tg3_flag(tp, NO_NVRAM)) {
15563 strcat(tp->fw_ver, "sb");
15564 tg3_read_otp_ver(tp);
15568 if (tg3_nvram_read(tp, 0, &val))
15571 if (val == TG3_EEPROM_MAGIC)
15572 tg3_read_bc_ver(tp);
15573 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15574 tg3_read_sb_ver(tp, val);
15575 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15576 tg3_read_hwsb_ver(tp);
15578 if (tg3_flag(tp, ENABLE_ASF)) {
15579 if (tg3_flag(tp, ENABLE_APE)) {
15580 tg3_probe_ncsi(tp);
15582 tg3_read_dash_ver(tp);
15583 } else if (!vpd_vers) {
15584 tg3_read_mgmtfw_ver(tp);
15588 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15591 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15593 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15594 return TG3_RX_RET_MAX_SIZE_5717;
15595 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15596 return TG3_RX_RET_MAX_SIZE_5700;
15598 return TG3_RX_RET_MAX_SIZE_5705;
15601 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15602 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15603 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15604 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15608 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15610 struct pci_dev *peer;
15611 unsigned int func, devnr = tp->pdev->devfn & ~7;
15613 for (func = 0; func < 8; func++) {
15614 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15615 if (peer && peer != tp->pdev)
15619 /* 5704 can be configured in single-port mode, set peer to
15620 * tp->pdev in that case.
15628 * We don't need to keep the refcount elevated; there's no way
15629 * to remove one half of this device without removing the other
15636 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15638 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15639 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15642 /* All devices that use the alternate
15643 * ASIC REV location have a CPMU.
15645 tg3_flag_set(tp, CPMU_PRESENT);
15647 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15655 reg = TG3PCI_GEN2_PRODID_ASICREV;
15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15657 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15658 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15659 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15660 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15661 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15662 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15663 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15664 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15665 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15666 reg = TG3PCI_GEN15_PRODID_ASICREV;
15668 reg = TG3PCI_PRODID_ASICREV;
15670 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15673 /* Wrong chip ID in 5752 A0. This code can be removed later
15674 * as A0 is not in production.
15676 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15677 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15679 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15680 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15682 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15683 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15684 tg3_asic_rev(tp) == ASIC_REV_5720)
15685 tg3_flag_set(tp, 5717_PLUS);
15687 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15688 tg3_asic_rev(tp) == ASIC_REV_57766)
15689 tg3_flag_set(tp, 57765_CLASS);
15691 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15692 tg3_asic_rev(tp) == ASIC_REV_5762)
15693 tg3_flag_set(tp, 57765_PLUS);
15695 /* Intentionally exclude ASIC_REV_5906 */
15696 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15697 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15698 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15699 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15700 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15701 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15702 tg3_flag(tp, 57765_PLUS))
15703 tg3_flag_set(tp, 5755_PLUS);
15705 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15706 tg3_asic_rev(tp) == ASIC_REV_5714)
15707 tg3_flag_set(tp, 5780_CLASS);
15709 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15710 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15711 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15712 tg3_flag(tp, 5755_PLUS) ||
15713 tg3_flag(tp, 5780_CLASS))
15714 tg3_flag_set(tp, 5750_PLUS);
15716 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15717 tg3_flag(tp, 5750_PLUS))
15718 tg3_flag_set(tp, 5705_PLUS);
15721 static bool tg3_10_100_only_device(struct tg3 *tp,
15722 const struct pci_device_id *ent)
15724 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15726 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15727 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15728 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15731 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15732 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15733 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15743 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15746 u32 pci_state_reg, grc_misc_cfg;
15751 /* Force memory write invalidate off. If we leave it on,
15752 * then on 5700_BX chips we have to enable a workaround.
15753 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15754 * to match the cacheline size. The Broadcom driver have this
15755 * workaround but turns MWI off all the times so never uses
15756 * it. This seems to suggest that the workaround is insufficient.
15758 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15759 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15760 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15762 /* Important! -- Make sure register accesses are byteswapped
15763 * correctly. Also, for those chips that require it, make
15764 * sure that indirect register accesses are enabled before
15765 * the first operation.
15767 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15769 tp->misc_host_ctrl |= (misc_ctrl_reg &
15770 MISC_HOST_CTRL_CHIPREV);
15771 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15772 tp->misc_host_ctrl);
15774 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15776 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15777 * we need to disable memory and use config. cycles
15778 * only to access all registers. The 5702/03 chips
15779 * can mistakenly decode the special cycles from the
15780 * ICH chipsets as memory write cycles, causing corruption
15781 * of register and memory space. Only certain ICH bridges
15782 * will drive special cycles with non-zero data during the
15783 * address phase which can fall within the 5703's address
15784 * range. This is not an ICH bug as the PCI spec allows
15785 * non-zero address during special cycles. However, only
15786 * these ICH bridges are known to drive non-zero addresses
15787 * during special cycles.
15789 * Since special cycles do not cross PCI bridges, we only
15790 * enable this workaround if the 5703 is on the secondary
15791 * bus of these ICH bridges.
15793 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15794 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15795 static struct tg3_dev_id {
15799 } ich_chipsets[] = {
15800 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15802 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15804 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15806 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15810 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15811 struct pci_dev *bridge = NULL;
15813 while (pci_id->vendor != 0) {
15814 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15820 if (pci_id->rev != PCI_ANY_ID) {
15821 if (bridge->revision > pci_id->rev)
15824 if (bridge->subordinate &&
15825 (bridge->subordinate->number ==
15826 tp->pdev->bus->number)) {
15827 tg3_flag_set(tp, ICH_WORKAROUND);
15828 pci_dev_put(bridge);
15834 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15835 static struct tg3_dev_id {
15838 } bridge_chipsets[] = {
15839 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15840 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15843 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15844 struct pci_dev *bridge = NULL;
15846 while (pci_id->vendor != 0) {
15847 bridge = pci_get_device(pci_id->vendor,
15854 if (bridge->subordinate &&
15855 (bridge->subordinate->number <=
15856 tp->pdev->bus->number) &&
15857 (bridge->subordinate->busn_res.end >=
15858 tp->pdev->bus->number)) {
15859 tg3_flag_set(tp, 5701_DMA_BUG);
15860 pci_dev_put(bridge);
15866 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15867 * DMA addresses > 40-bit. This bridge may have other additional
15868 * 57xx devices behind it in some 4-port NIC designs for example.
15869 * Any tg3 device found behind the bridge will also need the 40-bit
15872 if (tg3_flag(tp, 5780_CLASS)) {
15873 tg3_flag_set(tp, 40BIT_DMA_BUG);
15874 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15876 struct pci_dev *bridge = NULL;
15879 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15880 PCI_DEVICE_ID_SERVERWORKS_EPB,
15882 if (bridge && bridge->subordinate &&
15883 (bridge->subordinate->number <=
15884 tp->pdev->bus->number) &&
15885 (bridge->subordinate->busn_res.end >=
15886 tp->pdev->bus->number)) {
15887 tg3_flag_set(tp, 40BIT_DMA_BUG);
15888 pci_dev_put(bridge);
15894 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15895 tg3_asic_rev(tp) == ASIC_REV_5714)
15896 tp->pdev_peer = tg3_find_peer(tp);
15898 /* Determine TSO capabilities */
15899 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15900 ; /* Do nothing. HW bug. */
15901 else if (tg3_flag(tp, 57765_PLUS))
15902 tg3_flag_set(tp, HW_TSO_3);
15903 else if (tg3_flag(tp, 5755_PLUS) ||
15904 tg3_asic_rev(tp) == ASIC_REV_5906)
15905 tg3_flag_set(tp, HW_TSO_2);
15906 else if (tg3_flag(tp, 5750_PLUS)) {
15907 tg3_flag_set(tp, HW_TSO_1);
15908 tg3_flag_set(tp, TSO_BUG);
15909 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15910 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15911 tg3_flag_clear(tp, TSO_BUG);
15912 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15913 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15914 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15915 tg3_flag_set(tp, FW_TSO);
15916 tg3_flag_set(tp, TSO_BUG);
15917 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15918 tp->fw_needed = FIRMWARE_TG3TSO5;
15920 tp->fw_needed = FIRMWARE_TG3TSO;
15923 /* Selectively allow TSO based on operating conditions */
15924 if (tg3_flag(tp, HW_TSO_1) ||
15925 tg3_flag(tp, HW_TSO_2) ||
15926 tg3_flag(tp, HW_TSO_3) ||
15927 tg3_flag(tp, FW_TSO)) {
15928 /* For firmware TSO, assume ASF is disabled.
15929 * We'll disable TSO later if we discover ASF
15930 * is enabled in tg3_get_eeprom_hw_cfg().
15932 tg3_flag_set(tp, TSO_CAPABLE);
15934 tg3_flag_clear(tp, TSO_CAPABLE);
15935 tg3_flag_clear(tp, TSO_BUG);
15936 tp->fw_needed = NULL;
15939 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15940 tp->fw_needed = FIRMWARE_TG3;
15942 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15943 tp->fw_needed = FIRMWARE_TG357766;
15947 if (tg3_flag(tp, 5750_PLUS)) {
15948 tg3_flag_set(tp, SUPPORT_MSI);
15949 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15950 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15951 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15952 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15953 tp->pdev_peer == tp->pdev))
15954 tg3_flag_clear(tp, SUPPORT_MSI);
15956 if (tg3_flag(tp, 5755_PLUS) ||
15957 tg3_asic_rev(tp) == ASIC_REV_5906) {
15958 tg3_flag_set(tp, 1SHOT_MSI);
15961 if (tg3_flag(tp, 57765_PLUS)) {
15962 tg3_flag_set(tp, SUPPORT_MSIX);
15963 tp->irq_max = TG3_IRQ_MAX_VECS;
15969 if (tp->irq_max > 1) {
15970 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15971 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15973 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15974 tg3_asic_rev(tp) == ASIC_REV_5720)
15975 tp->txq_max = tp->irq_max - 1;
15978 if (tg3_flag(tp, 5755_PLUS) ||
15979 tg3_asic_rev(tp) == ASIC_REV_5906)
15980 tg3_flag_set(tp, SHORT_DMA_BUG);
15982 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15983 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15985 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15986 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15987 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15988 tg3_asic_rev(tp) == ASIC_REV_5762)
15989 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15991 if (tg3_flag(tp, 57765_PLUS) &&
15992 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15993 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15995 if (!tg3_flag(tp, 5705_PLUS) ||
15996 tg3_flag(tp, 5780_CLASS) ||
15997 tg3_flag(tp, USE_JUMBO_BDFLAG))
15998 tg3_flag_set(tp, JUMBO_CAPABLE);
16000 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16003 if (pci_is_pcie(tp->pdev)) {
16006 tg3_flag_set(tp, PCI_EXPRESS);
16008 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16009 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16010 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16011 tg3_flag_clear(tp, HW_TSO_2);
16012 tg3_flag_clear(tp, TSO_CAPABLE);
16014 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16015 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16016 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16017 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16018 tg3_flag_set(tp, CLKREQ_BUG);
16019 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16020 tg3_flag_set(tp, L1PLLPD_EN);
16022 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16023 /* BCM5785 devices are effectively PCIe devices, and should
16024 * follow PCIe codepaths, but do not have a PCIe capabilities
16027 tg3_flag_set(tp, PCI_EXPRESS);
16028 } else if (!tg3_flag(tp, 5705_PLUS) ||
16029 tg3_flag(tp, 5780_CLASS)) {
16030 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16031 if (!tp->pcix_cap) {
16032 dev_err(&tp->pdev->dev,
16033 "Cannot find PCI-X capability, aborting\n");
16037 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16038 tg3_flag_set(tp, PCIX_MODE);
16041 /* If we have an AMD 762 or VIA K8T800 chipset, write
16042 * reordering to the mailbox registers done by the host
16043 * controller can cause major troubles. We read back from
16044 * every mailbox register write to force the writes to be
16045 * posted to the chip in order.
16047 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16048 !tg3_flag(tp, PCI_EXPRESS))
16049 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16051 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16052 &tp->pci_cacheline_sz);
16053 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16054 &tp->pci_lat_timer);
16055 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16056 tp->pci_lat_timer < 64) {
16057 tp->pci_lat_timer = 64;
16058 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16059 tp->pci_lat_timer);
16062 /* Important! -- It is critical that the PCI-X hw workaround
16063 * situation is decided before the first MMIO register access.
16065 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16066 /* 5700 BX chips need to have their TX producer index
16067 * mailboxes written twice to workaround a bug.
16069 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16071 /* If we are in PCI-X mode, enable register write workaround.
16073 * The workaround is to use indirect register accesses
16074 * for all chip writes not to mailbox registers.
16076 if (tg3_flag(tp, PCIX_MODE)) {
16079 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16081 /* The chip can have it's power management PCI config
16082 * space registers clobbered due to this bug.
16083 * So explicitly force the chip into D0 here.
16085 pci_read_config_dword(tp->pdev,
16086 tp->pm_cap + PCI_PM_CTRL,
16088 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16089 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16090 pci_write_config_dword(tp->pdev,
16091 tp->pm_cap + PCI_PM_CTRL,
16094 /* Also, force SERR#/PERR# in PCI command. */
16095 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16096 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16097 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16101 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16102 tg3_flag_set(tp, PCI_HIGH_SPEED);
16103 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16104 tg3_flag_set(tp, PCI_32BIT);
16106 /* Chip-specific fixup from Broadcom driver */
16107 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16108 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16109 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16110 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16113 /* Default fast path register access methods */
16114 tp->read32 = tg3_read32;
16115 tp->write32 = tg3_write32;
16116 tp->read32_mbox = tg3_read32;
16117 tp->write32_mbox = tg3_write32;
16118 tp->write32_tx_mbox = tg3_write32;
16119 tp->write32_rx_mbox = tg3_write32;
16121 /* Various workaround register access methods */
16122 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16123 tp->write32 = tg3_write_indirect_reg32;
16124 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16125 (tg3_flag(tp, PCI_EXPRESS) &&
16126 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16128 * Back to back register writes can cause problems on these
16129 * chips, the workaround is to read back all reg writes
16130 * except those to mailbox regs.
16132 * See tg3_write_indirect_reg32().
16134 tp->write32 = tg3_write_flush_reg32;
16137 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16138 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16139 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16140 tp->write32_rx_mbox = tg3_write_flush_reg32;
16143 if (tg3_flag(tp, ICH_WORKAROUND)) {
16144 tp->read32 = tg3_read_indirect_reg32;
16145 tp->write32 = tg3_write_indirect_reg32;
16146 tp->read32_mbox = tg3_read_indirect_mbox;
16147 tp->write32_mbox = tg3_write_indirect_mbox;
16148 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16149 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16154 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16155 pci_cmd &= ~PCI_COMMAND_MEMORY;
16156 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16158 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16159 tp->read32_mbox = tg3_read32_mbox_5906;
16160 tp->write32_mbox = tg3_write32_mbox_5906;
16161 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16162 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16165 if (tp->write32 == tg3_write_indirect_reg32 ||
16166 (tg3_flag(tp, PCIX_MODE) &&
16167 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16168 tg3_asic_rev(tp) == ASIC_REV_5701)))
16169 tg3_flag_set(tp, SRAM_USE_CONFIG);
16171 /* The memory arbiter has to be enabled in order for SRAM accesses
16172 * to succeed. Normally on powerup the tg3 chip firmware will make
16173 * sure it is enabled, but other entities such as system netboot
16174 * code might disable it.
16176 val = tr32(MEMARB_MODE);
16177 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16179 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16180 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16181 tg3_flag(tp, 5780_CLASS)) {
16182 if (tg3_flag(tp, PCIX_MODE)) {
16183 pci_read_config_dword(tp->pdev,
16184 tp->pcix_cap + PCI_X_STATUS,
16186 tp->pci_fn = val & 0x7;
16188 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16189 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16190 tg3_asic_rev(tp) == ASIC_REV_5720) {
16191 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16192 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16193 val = tr32(TG3_CPMU_STATUS);
16195 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16196 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16198 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16199 TG3_CPMU_STATUS_FSHFT_5719;
16202 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16203 tp->write32_tx_mbox = tg3_write_flush_reg32;
16204 tp->write32_rx_mbox = tg3_write_flush_reg32;
16207 /* Get eeprom hw config before calling tg3_set_power_state().
16208 * In particular, the TG3_FLAG_IS_NIC flag must be
16209 * determined before calling tg3_set_power_state() so that
16210 * we know whether or not to switch out of Vaux power.
16211 * When the flag is set, it means that GPIO1 is used for eeprom
16212 * write protect and also implies that it is a LOM where GPIOs
16213 * are not used to switch power.
16215 tg3_get_eeprom_hw_cfg(tp);
16217 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16218 tg3_flag_clear(tp, TSO_CAPABLE);
16219 tg3_flag_clear(tp, TSO_BUG);
16220 tp->fw_needed = NULL;
16223 if (tg3_flag(tp, ENABLE_APE)) {
16224 /* Allow reads and writes to the
16225 * APE register and memory space.
16227 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16228 PCISTATE_ALLOW_APE_SHMEM_WR |
16229 PCISTATE_ALLOW_APE_PSPACE_WR;
16230 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16233 tg3_ape_lock_init(tp);
16236 /* Set up tp->grc_local_ctrl before calling
16237 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16238 * will bring 5700's external PHY out of reset.
16239 * It is also used as eeprom write protect on LOMs.
16241 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16242 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16243 tg3_flag(tp, EEPROM_WRITE_PROT))
16244 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16245 GRC_LCLCTRL_GPIO_OUTPUT1);
16246 /* Unused GPIO3 must be driven as output on 5752 because there
16247 * are no pull-up resistors on unused GPIO pins.
16249 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16250 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16252 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16253 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16254 tg3_flag(tp, 57765_CLASS))
16255 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16257 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16258 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16259 /* Turn off the debug UART. */
16260 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16261 if (tg3_flag(tp, IS_NIC))
16262 /* Keep VMain power. */
16263 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16264 GRC_LCLCTRL_GPIO_OUTPUT0;
16267 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16268 tp->grc_local_ctrl |=
16269 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16271 /* Switch out of Vaux if it is a NIC */
16272 tg3_pwrsrc_switch_to_vmain(tp);
16274 /* Derive initial jumbo mode from MTU assigned in
16275 * ether_setup() via the alloc_etherdev() call
16277 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16278 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16280 /* Determine WakeOnLan speed to use. */
16281 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16282 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16283 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16284 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16285 tg3_flag_clear(tp, WOL_SPEED_100MB);
16287 tg3_flag_set(tp, WOL_SPEED_100MB);
16290 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16291 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16293 /* A few boards don't want Ethernet@WireSpeed phy feature */
16294 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16295 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16296 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16297 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16298 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16299 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16300 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16302 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16303 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16304 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16305 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16306 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16308 if (tg3_flag(tp, 5705_PLUS) &&
16309 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16310 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16311 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16312 !tg3_flag(tp, 57765_PLUS)) {
16313 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16314 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16315 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16316 tg3_asic_rev(tp) == ASIC_REV_5761) {
16317 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16318 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16319 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16320 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16321 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16323 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16326 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16327 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16328 tp->phy_otp = tg3_read_otp_phycfg(tp);
16329 if (tp->phy_otp == 0)
16330 tp->phy_otp = TG3_OTP_DEFAULT;
16333 if (tg3_flag(tp, CPMU_PRESENT))
16334 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16336 tp->mi_mode = MAC_MI_MODE_BASE;
16338 tp->coalesce_mode = 0;
16339 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16340 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16341 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16343 /* Set these bits to enable statistics workaround. */
16344 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16345 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16346 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16347 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16348 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16351 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16352 tg3_asic_rev(tp) == ASIC_REV_57780)
16353 tg3_flag_set(tp, USE_PHYLIB);
16355 err = tg3_mdio_init(tp);
16359 /* Initialize data/descriptor byte/word swapping. */
16360 val = tr32(GRC_MODE);
16361 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16362 tg3_asic_rev(tp) == ASIC_REV_5762)
16363 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16364 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16365 GRC_MODE_B2HRX_ENABLE |
16366 GRC_MODE_HTX2B_ENABLE |
16367 GRC_MODE_HOST_STACKUP);
16369 val &= GRC_MODE_HOST_STACKUP;
16371 tw32(GRC_MODE, val | tp->grc_mode);
16373 tg3_switch_clocks(tp);
16375 /* Clear this out for sanity. */
16376 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16378 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16380 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16381 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16382 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16383 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16384 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16385 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16386 void __iomem *sram_base;
16388 /* Write some dummy words into the SRAM status block
16389 * area, see if it reads back correctly. If the return
16390 * value is bad, force enable the PCIX workaround.
16392 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16394 writel(0x00000000, sram_base);
16395 writel(0x00000000, sram_base + 4);
16396 writel(0xffffffff, sram_base + 4);
16397 if (readl(sram_base) != 0x00000000)
16398 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16403 tg3_nvram_init(tp);
16405 /* If the device has an NVRAM, no need to load patch firmware */
16406 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16407 !tg3_flag(tp, NO_NVRAM))
16408 tp->fw_needed = NULL;
16410 grc_misc_cfg = tr32(GRC_MISC_CFG);
16411 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16413 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16414 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16415 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16416 tg3_flag_set(tp, IS_5788);
16418 if (!tg3_flag(tp, IS_5788) &&
16419 tg3_asic_rev(tp) != ASIC_REV_5700)
16420 tg3_flag_set(tp, TAGGED_STATUS);
16421 if (tg3_flag(tp, TAGGED_STATUS)) {
16422 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16423 HOSTCC_MODE_CLRTICK_TXBD);
16425 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16427 tp->misc_host_ctrl);
16430 /* Preserve the APE MAC_MODE bits */
16431 if (tg3_flag(tp, ENABLE_APE))
16432 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16436 if (tg3_10_100_only_device(tp, ent))
16437 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16439 err = tg3_phy_probe(tp);
16441 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16442 /* ... but do not return immediately ... */
16447 tg3_read_fw_ver(tp);
16449 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16450 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16452 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16453 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16455 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16458 /* 5700 {AX,BX} chips have a broken status block link
16459 * change bit implementation, so we must use the
16460 * status register in those cases.
16462 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16463 tg3_flag_set(tp, USE_LINKCHG_REG);
16465 tg3_flag_clear(tp, USE_LINKCHG_REG);
16467 /* The led_ctrl is set during tg3_phy_probe, here we might
16468 * have to force the link status polling mechanism based
16469 * upon subsystem IDs.
16471 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16472 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16473 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16474 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16475 tg3_flag_set(tp, USE_LINKCHG_REG);
16478 /* For all SERDES we poll the MAC status register. */
16479 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16480 tg3_flag_set(tp, POLL_SERDES);
16482 tg3_flag_clear(tp, POLL_SERDES);
16484 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16485 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16486 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16487 tg3_flag(tp, PCIX_MODE)) {
16488 tp->rx_offset = NET_SKB_PAD;
16489 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16490 tp->rx_copy_thresh = ~(u16)0;
16494 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16495 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16496 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16498 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16500 /* Increment the rx prod index on the rx std ring by at most
16501 * 8 for these chips to workaround hw errata.
16503 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16504 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16505 tg3_asic_rev(tp) == ASIC_REV_5755)
16506 tp->rx_std_max_post = 8;
16508 if (tg3_flag(tp, ASPM_WORKAROUND))
16509 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16510 PCIE_PWR_MGMT_L1_THRESH_MSK;
16515 #ifdef CONFIG_SPARC
16516 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16518 struct net_device *dev = tp->dev;
16519 struct pci_dev *pdev = tp->pdev;
16520 struct device_node *dp = pci_device_to_OF_node(pdev);
16521 const unsigned char *addr;
16524 addr = of_get_property(dp, "local-mac-address", &len);
16525 if (addr && len == 6) {
16526 memcpy(dev->dev_addr, addr, 6);
16532 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16534 struct net_device *dev = tp->dev;
16536 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16541 static int tg3_get_device_address(struct tg3 *tp)
16543 struct net_device *dev = tp->dev;
16544 u32 hi, lo, mac_offset;
16548 #ifdef CONFIG_SPARC
16549 if (!tg3_get_macaddr_sparc(tp))
16553 if (tg3_flag(tp, IS_SSB_CORE)) {
16554 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16555 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16560 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16561 tg3_flag(tp, 5780_CLASS)) {
16562 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16564 if (tg3_nvram_lock(tp))
16565 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16567 tg3_nvram_unlock(tp);
16568 } else if (tg3_flag(tp, 5717_PLUS)) {
16569 if (tp->pci_fn & 1)
16571 if (tp->pci_fn > 1)
16572 mac_offset += 0x18c;
16573 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16576 /* First try to get it from MAC address mailbox. */
16577 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16578 if ((hi >> 16) == 0x484b) {
16579 dev->dev_addr[0] = (hi >> 8) & 0xff;
16580 dev->dev_addr[1] = (hi >> 0) & 0xff;
16582 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16583 dev->dev_addr[2] = (lo >> 24) & 0xff;
16584 dev->dev_addr[3] = (lo >> 16) & 0xff;
16585 dev->dev_addr[4] = (lo >> 8) & 0xff;
16586 dev->dev_addr[5] = (lo >> 0) & 0xff;
16588 /* Some old bootcode may report a 0 MAC address in SRAM */
16589 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16592 /* Next, try NVRAM. */
16593 if (!tg3_flag(tp, NO_NVRAM) &&
16594 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16595 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16596 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16597 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16599 /* Finally just fetch it out of the MAC control regs. */
16601 hi = tr32(MAC_ADDR_0_HIGH);
16602 lo = tr32(MAC_ADDR_0_LOW);
16604 dev->dev_addr[5] = lo & 0xff;
16605 dev->dev_addr[4] = (lo >> 8) & 0xff;
16606 dev->dev_addr[3] = (lo >> 16) & 0xff;
16607 dev->dev_addr[2] = (lo >> 24) & 0xff;
16608 dev->dev_addr[1] = hi & 0xff;
16609 dev->dev_addr[0] = (hi >> 8) & 0xff;
16613 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16614 #ifdef CONFIG_SPARC
16615 if (!tg3_get_default_macaddr_sparc(tp))
16623 #define BOUNDARY_SINGLE_CACHELINE 1
16624 #define BOUNDARY_MULTI_CACHELINE 2
16626 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16628 int cacheline_size;
16632 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16634 cacheline_size = 1024;
16636 cacheline_size = (int) byte * 4;
16638 /* On 5703 and later chips, the boundary bits have no
16641 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16642 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16643 !tg3_flag(tp, PCI_EXPRESS))
16646 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16647 goal = BOUNDARY_MULTI_CACHELINE;
16649 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16650 goal = BOUNDARY_SINGLE_CACHELINE;
16656 if (tg3_flag(tp, 57765_PLUS)) {
16657 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16664 /* PCI controllers on most RISC systems tend to disconnect
16665 * when a device tries to burst across a cache-line boundary.
16666 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16668 * Unfortunately, for PCI-E there are only limited
16669 * write-side controls for this, and thus for reads
16670 * we will still get the disconnects. We'll also waste
16671 * these PCI cycles for both read and write for chips
16672 * other than 5700 and 5701 which do not implement the
16675 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16676 switch (cacheline_size) {
16681 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16682 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16683 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16685 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16686 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16691 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16692 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16696 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16697 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16700 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16701 switch (cacheline_size) {
16705 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16706 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16707 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16713 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16714 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16718 switch (cacheline_size) {
16720 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16721 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16722 DMA_RWCTRL_WRITE_BNDRY_16);
16727 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16728 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16729 DMA_RWCTRL_WRITE_BNDRY_32);
16734 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16735 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16736 DMA_RWCTRL_WRITE_BNDRY_64);
16741 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16742 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16743 DMA_RWCTRL_WRITE_BNDRY_128);
16748 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16749 DMA_RWCTRL_WRITE_BNDRY_256);
16752 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16753 DMA_RWCTRL_WRITE_BNDRY_512);
16757 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16758 DMA_RWCTRL_WRITE_BNDRY_1024);
16767 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16768 int size, bool to_device)
16770 struct tg3_internal_buffer_desc test_desc;
16771 u32 sram_dma_descs;
16774 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16776 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16777 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16778 tw32(RDMAC_STATUS, 0);
16779 tw32(WDMAC_STATUS, 0);
16781 tw32(BUFMGR_MODE, 0);
16782 tw32(FTQ_RESET, 0);
16784 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16785 test_desc.addr_lo = buf_dma & 0xffffffff;
16786 test_desc.nic_mbuf = 0x00002100;
16787 test_desc.len = size;
16790 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16791 * the *second* time the tg3 driver was getting loaded after an
16794 * Broadcom tells me:
16795 * ...the DMA engine is connected to the GRC block and a DMA
16796 * reset may affect the GRC block in some unpredictable way...
16797 * The behavior of resets to individual blocks has not been tested.
16799 * Broadcom noted the GRC reset will also reset all sub-components.
16802 test_desc.cqid_sqid = (13 << 8) | 2;
16804 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16807 test_desc.cqid_sqid = (16 << 8) | 7;
16809 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16812 test_desc.flags = 0x00000005;
16814 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16817 val = *(((u32 *)&test_desc) + i);
16818 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16819 sram_dma_descs + (i * sizeof(u32)));
16820 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16822 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16825 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16827 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16830 for (i = 0; i < 40; i++) {
16834 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16836 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16837 if ((val & 0xffff) == sram_dma_descs) {
16848 #define TEST_BUFFER_SIZE 0x2000
16850 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16851 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16855 static int tg3_test_dma(struct tg3 *tp)
16857 dma_addr_t buf_dma;
16858 u32 *buf, saved_dma_rwctrl;
16861 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16862 &buf_dma, GFP_KERNEL);
16868 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16869 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16871 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16873 if (tg3_flag(tp, 57765_PLUS))
16876 if (tg3_flag(tp, PCI_EXPRESS)) {
16877 /* DMA read watermark not used on PCIE */
16878 tp->dma_rwctrl |= 0x00180000;
16879 } else if (!tg3_flag(tp, PCIX_MODE)) {
16880 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16881 tg3_asic_rev(tp) == ASIC_REV_5750)
16882 tp->dma_rwctrl |= 0x003f0000;
16884 tp->dma_rwctrl |= 0x003f000f;
16886 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16887 tg3_asic_rev(tp) == ASIC_REV_5704) {
16888 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16889 u32 read_water = 0x7;
16891 /* If the 5704 is behind the EPB bridge, we can
16892 * do the less restrictive ONE_DMA workaround for
16893 * better performance.
16895 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16896 tg3_asic_rev(tp) == ASIC_REV_5704)
16897 tp->dma_rwctrl |= 0x8000;
16898 else if (ccval == 0x6 || ccval == 0x7)
16899 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16901 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16903 /* Set bit 23 to enable PCIX hw bug fix */
16905 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16906 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16908 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16909 /* 5780 always in PCIX mode */
16910 tp->dma_rwctrl |= 0x00144000;
16911 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16912 /* 5714 always in PCIX mode */
16913 tp->dma_rwctrl |= 0x00148000;
16915 tp->dma_rwctrl |= 0x001b000f;
16918 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16919 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16921 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16922 tg3_asic_rev(tp) == ASIC_REV_5704)
16923 tp->dma_rwctrl &= 0xfffffff0;
16925 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16926 tg3_asic_rev(tp) == ASIC_REV_5701) {
16927 /* Remove this if it causes problems for some boards. */
16928 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16930 /* On 5700/5701 chips, we need to set this bit.
16931 * Otherwise the chip will issue cacheline transactions
16932 * to streamable DMA memory with not all the byte
16933 * enables turned on. This is an error on several
16934 * RISC PCI controllers, in particular sparc64.
16936 * On 5703/5704 chips, this bit has been reassigned
16937 * a different meaning. In particular, it is used
16938 * on those chips to enable a PCI-X workaround.
16940 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16943 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16946 /* Unneeded, already done by tg3_get_invariants. */
16947 tg3_switch_clocks(tp);
16950 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16951 tg3_asic_rev(tp) != ASIC_REV_5701)
16954 /* It is best to perform DMA test with maximum write burst size
16955 * to expose the 5700/5701 write DMA bug.
16957 saved_dma_rwctrl = tp->dma_rwctrl;
16958 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16959 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16964 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16967 /* Send the buffer to the chip. */
16968 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16970 dev_err(&tp->pdev->dev,
16971 "%s: Buffer write failed. err = %d\n",
16977 /* validate data reached card RAM correctly. */
16978 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16980 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16981 if (le32_to_cpu(val) != p[i]) {
16982 dev_err(&tp->pdev->dev,
16983 "%s: Buffer corrupted on device! "
16984 "(%d != %d)\n", __func__, val, i);
16985 /* ret = -ENODEV here? */
16990 /* Now read it back. */
16991 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16993 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16994 "err = %d\n", __func__, ret);
16999 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17003 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17004 DMA_RWCTRL_WRITE_BNDRY_16) {
17005 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17006 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17007 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17010 dev_err(&tp->pdev->dev,
17011 "%s: Buffer corrupted on read back! "
17012 "(%d != %d)\n", __func__, p[i], i);
17018 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17024 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17025 DMA_RWCTRL_WRITE_BNDRY_16) {
17026 /* DMA test passed without adjusting DMA boundary,
17027 * now look for chipsets that are known to expose the
17028 * DMA bug without failing the test.
17030 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17031 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17032 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17034 /* Safe to use the calculated DMA boundary. */
17035 tp->dma_rwctrl = saved_dma_rwctrl;
17038 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17042 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17047 static void tg3_init_bufmgr_config(struct tg3 *tp)
17049 if (tg3_flag(tp, 57765_PLUS)) {
17050 tp->bufmgr_config.mbuf_read_dma_low_water =
17051 DEFAULT_MB_RDMA_LOW_WATER_5705;
17052 tp->bufmgr_config.mbuf_mac_rx_low_water =
17053 DEFAULT_MB_MACRX_LOW_WATER_57765;
17054 tp->bufmgr_config.mbuf_high_water =
17055 DEFAULT_MB_HIGH_WATER_57765;
17057 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17058 DEFAULT_MB_RDMA_LOW_WATER_5705;
17059 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17060 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17061 tp->bufmgr_config.mbuf_high_water_jumbo =
17062 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17063 } else if (tg3_flag(tp, 5705_PLUS)) {
17064 tp->bufmgr_config.mbuf_read_dma_low_water =
17065 DEFAULT_MB_RDMA_LOW_WATER_5705;
17066 tp->bufmgr_config.mbuf_mac_rx_low_water =
17067 DEFAULT_MB_MACRX_LOW_WATER_5705;
17068 tp->bufmgr_config.mbuf_high_water =
17069 DEFAULT_MB_HIGH_WATER_5705;
17070 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17071 tp->bufmgr_config.mbuf_mac_rx_low_water =
17072 DEFAULT_MB_MACRX_LOW_WATER_5906;
17073 tp->bufmgr_config.mbuf_high_water =
17074 DEFAULT_MB_HIGH_WATER_5906;
17077 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17078 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17079 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17080 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17081 tp->bufmgr_config.mbuf_high_water_jumbo =
17082 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17084 tp->bufmgr_config.mbuf_read_dma_low_water =
17085 DEFAULT_MB_RDMA_LOW_WATER;
17086 tp->bufmgr_config.mbuf_mac_rx_low_water =
17087 DEFAULT_MB_MACRX_LOW_WATER;
17088 tp->bufmgr_config.mbuf_high_water =
17089 DEFAULT_MB_HIGH_WATER;
17091 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17092 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17093 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17094 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17095 tp->bufmgr_config.mbuf_high_water_jumbo =
17096 DEFAULT_MB_HIGH_WATER_JUMBO;
17099 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17100 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17103 static char *tg3_phy_string(struct tg3 *tp)
17105 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17106 case TG3_PHY_ID_BCM5400: return "5400";
17107 case TG3_PHY_ID_BCM5401: return "5401";
17108 case TG3_PHY_ID_BCM5411: return "5411";
17109 case TG3_PHY_ID_BCM5701: return "5701";
17110 case TG3_PHY_ID_BCM5703: return "5703";
17111 case TG3_PHY_ID_BCM5704: return "5704";
17112 case TG3_PHY_ID_BCM5705: return "5705";
17113 case TG3_PHY_ID_BCM5750: return "5750";
17114 case TG3_PHY_ID_BCM5752: return "5752";
17115 case TG3_PHY_ID_BCM5714: return "5714";
17116 case TG3_PHY_ID_BCM5780: return "5780";
17117 case TG3_PHY_ID_BCM5755: return "5755";
17118 case TG3_PHY_ID_BCM5787: return "5787";
17119 case TG3_PHY_ID_BCM5784: return "5784";
17120 case TG3_PHY_ID_BCM5756: return "5722/5756";
17121 case TG3_PHY_ID_BCM5906: return "5906";
17122 case TG3_PHY_ID_BCM5761: return "5761";
17123 case TG3_PHY_ID_BCM5718C: return "5718C";
17124 case TG3_PHY_ID_BCM5718S: return "5718S";
17125 case TG3_PHY_ID_BCM57765: return "57765";
17126 case TG3_PHY_ID_BCM5719C: return "5719C";
17127 case TG3_PHY_ID_BCM5720C: return "5720C";
17128 case TG3_PHY_ID_BCM5762: return "5762C";
17129 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17130 case 0: return "serdes";
17131 default: return "unknown";
17135 static char *tg3_bus_string(struct tg3 *tp, char *str)
17137 if (tg3_flag(tp, PCI_EXPRESS)) {
17138 strcpy(str, "PCI Express");
17140 } else if (tg3_flag(tp, PCIX_MODE)) {
17141 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17143 strcpy(str, "PCIX:");
17145 if ((clock_ctrl == 7) ||
17146 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17147 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17148 strcat(str, "133MHz");
17149 else if (clock_ctrl == 0)
17150 strcat(str, "33MHz");
17151 else if (clock_ctrl == 2)
17152 strcat(str, "50MHz");
17153 else if (clock_ctrl == 4)
17154 strcat(str, "66MHz");
17155 else if (clock_ctrl == 6)
17156 strcat(str, "100MHz");
17158 strcpy(str, "PCI:");
17159 if (tg3_flag(tp, PCI_HIGH_SPEED))
17160 strcat(str, "66MHz");
17162 strcat(str, "33MHz");
17164 if (tg3_flag(tp, PCI_32BIT))
17165 strcat(str, ":32-bit");
17167 strcat(str, ":64-bit");
17171 static void tg3_init_coal(struct tg3 *tp)
17173 struct ethtool_coalesce *ec = &tp->coal;
17175 memset(ec, 0, sizeof(*ec));
17176 ec->cmd = ETHTOOL_GCOALESCE;
17177 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17178 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17179 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17180 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17181 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17182 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17183 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17184 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17185 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17187 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17188 HOSTCC_MODE_CLRTICK_TXBD)) {
17189 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17190 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17191 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17192 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17195 if (tg3_flag(tp, 5705_PLUS)) {
17196 ec->rx_coalesce_usecs_irq = 0;
17197 ec->tx_coalesce_usecs_irq = 0;
17198 ec->stats_block_coalesce_usecs = 0;
17202 static int tg3_init_one(struct pci_dev *pdev,
17203 const struct pci_device_id *ent)
17205 struct net_device *dev;
17208 u32 sndmbx, rcvmbx, intmbx;
17210 u64 dma_mask, persist_dma_mask;
17211 netdev_features_t features = 0;
17213 printk_once(KERN_INFO "%s\n", version);
17215 err = pci_enable_device(pdev);
17217 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17221 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17223 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17224 goto err_out_disable_pdev;
17227 pci_set_master(pdev);
17229 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17232 goto err_out_free_res;
17235 SET_NETDEV_DEV(dev, &pdev->dev);
17237 tp = netdev_priv(dev);
17240 tp->pm_cap = pdev->pm_cap;
17241 tp->rx_mode = TG3_DEF_RX_MODE;
17242 tp->tx_mode = TG3_DEF_TX_MODE;
17246 tp->msg_enable = tg3_debug;
17248 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17250 if (pdev_is_ssb_gige_core(pdev)) {
17251 tg3_flag_set(tp, IS_SSB_CORE);
17252 if (ssb_gige_must_flush_posted_writes(pdev))
17253 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17254 if (ssb_gige_one_dma_at_once(pdev))
17255 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17256 if (ssb_gige_have_roboswitch(pdev))
17257 tg3_flag_set(tp, ROBOSWITCH);
17258 if (ssb_gige_is_rgmii(pdev))
17259 tg3_flag_set(tp, RGMII_MODE);
17262 /* The word/byte swap controls here control register access byte
17263 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17266 tp->misc_host_ctrl =
17267 MISC_HOST_CTRL_MASK_PCI_INT |
17268 MISC_HOST_CTRL_WORD_SWAP |
17269 MISC_HOST_CTRL_INDIR_ACCESS |
17270 MISC_HOST_CTRL_PCISTATE_RW;
17272 /* The NONFRM (non-frame) byte/word swap controls take effect
17273 * on descriptor entries, anything which isn't packet data.
17275 * The StrongARM chips on the board (one for tx, one for rx)
17276 * are running in big-endian mode.
17278 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17279 GRC_MODE_WSWAP_NONFRM_DATA);
17280 #ifdef __BIG_ENDIAN
17281 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17283 spin_lock_init(&tp->lock);
17284 spin_lock_init(&tp->indirect_lock);
17285 INIT_WORK(&tp->reset_task, tg3_reset_task);
17287 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17289 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17291 goto err_out_free_dev;
17294 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17295 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17296 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17297 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17299 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17300 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17301 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17302 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17303 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17304 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17305 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17306 tg3_flag_set(tp, ENABLE_APE);
17307 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17308 if (!tp->aperegs) {
17309 dev_err(&pdev->dev,
17310 "Cannot map APE registers, aborting\n");
17312 goto err_out_iounmap;
17316 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17317 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17319 dev->ethtool_ops = &tg3_ethtool_ops;
17320 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17321 dev->netdev_ops = &tg3_netdev_ops;
17322 dev->irq = pdev->irq;
17324 err = tg3_get_invariants(tp, ent);
17326 dev_err(&pdev->dev,
17327 "Problem fetching invariants of chip, aborting\n");
17328 goto err_out_apeunmap;
17331 /* The EPB bridge inside 5714, 5715, and 5780 and any
17332 * device behind the EPB cannot support DMA addresses > 40-bit.
17333 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17334 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17335 * do DMA address check in tg3_start_xmit().
17337 if (tg3_flag(tp, IS_5788))
17338 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17339 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17340 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17341 #ifdef CONFIG_HIGHMEM
17342 dma_mask = DMA_BIT_MASK(64);
17345 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17347 /* Configure DMA attributes. */
17348 if (dma_mask > DMA_BIT_MASK(32)) {
17349 err = pci_set_dma_mask(pdev, dma_mask);
17351 features |= NETIF_F_HIGHDMA;
17352 err = pci_set_consistent_dma_mask(pdev,
17355 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17356 "DMA for consistent allocations\n");
17357 goto err_out_apeunmap;
17361 if (err || dma_mask == DMA_BIT_MASK(32)) {
17362 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17364 dev_err(&pdev->dev,
17365 "No usable DMA configuration, aborting\n");
17366 goto err_out_apeunmap;
17370 tg3_init_bufmgr_config(tp);
17372 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17374 /* 5700 B0 chips do not support checksumming correctly due
17375 * to hardware bugs.
17377 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17378 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17380 if (tg3_flag(tp, 5755_PLUS))
17381 features |= NETIF_F_IPV6_CSUM;
17384 /* TSO is on by default on chips that support hardware TSO.
17385 * Firmware TSO on older chips gives lower performance, so it
17386 * is off by default, but can be enabled using ethtool.
17388 if ((tg3_flag(tp, HW_TSO_1) ||
17389 tg3_flag(tp, HW_TSO_2) ||
17390 tg3_flag(tp, HW_TSO_3)) &&
17391 (features & NETIF_F_IP_CSUM))
17392 features |= NETIF_F_TSO;
17393 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17394 if (features & NETIF_F_IPV6_CSUM)
17395 features |= NETIF_F_TSO6;
17396 if (tg3_flag(tp, HW_TSO_3) ||
17397 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17398 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17399 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17400 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17401 tg3_asic_rev(tp) == ASIC_REV_57780)
17402 features |= NETIF_F_TSO_ECN;
17405 dev->features |= features;
17406 dev->vlan_features |= features;
17409 * Add loopback capability only for a subset of devices that support
17410 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17411 * loopback for the remaining devices.
17413 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17414 !tg3_flag(tp, CPMU_PRESENT))
17415 /* Add the loopback capability */
17416 features |= NETIF_F_LOOPBACK;
17418 dev->hw_features |= features;
17420 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17421 !tg3_flag(tp, TSO_CAPABLE) &&
17422 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17423 tg3_flag_set(tp, MAX_RXPEND_64);
17424 tp->rx_pending = 63;
17427 err = tg3_get_device_address(tp);
17429 dev_err(&pdev->dev,
17430 "Could not obtain valid ethernet address, aborting\n");
17431 goto err_out_apeunmap;
17435 * Reset chip in case UNDI or EFI driver did not shutdown
17436 * DMA self test will enable WDMAC and we'll see (spurious)
17437 * pending DMA on the PCI bus at that point.
17439 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17440 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17441 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17442 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17445 err = tg3_test_dma(tp);
17447 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17448 goto err_out_apeunmap;
17451 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17452 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17453 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17454 for (i = 0; i < tp->irq_max; i++) {
17455 struct tg3_napi *tnapi = &tp->napi[i];
17458 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17460 tnapi->int_mbox = intmbx;
17466 tnapi->consmbox = rcvmbx;
17467 tnapi->prodmbox = sndmbx;
17470 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17472 tnapi->coal_now = HOSTCC_MODE_NOW;
17474 if (!tg3_flag(tp, SUPPORT_MSIX))
17478 * If we support MSIX, we'll be using RSS. If we're using
17479 * RSS, the first vector only handles link interrupts and the
17480 * remaining vectors handle rx and tx interrupts. Reuse the
17481 * mailbox values for the next iteration. The values we setup
17482 * above are still useful for the single vectored mode.
17497 pci_set_drvdata(pdev, dev);
17499 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17500 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17501 tg3_asic_rev(tp) == ASIC_REV_5762)
17502 tg3_flag_set(tp, PTP_CAPABLE);
17504 if (tg3_flag(tp, 5717_PLUS)) {
17505 /* Resume a low-power mode */
17506 tg3_frob_aux_power(tp, false);
17509 tg3_timer_init(tp);
17511 tg3_carrier_off(tp);
17513 err = register_netdev(dev);
17515 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17516 goto err_out_apeunmap;
17519 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17520 tp->board_part_number,
17521 tg3_chip_rev_id(tp),
17522 tg3_bus_string(tp, str),
17525 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17526 struct phy_device *phydev;
17527 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17529 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17530 phydev->drv->name, dev_name(&phydev->dev));
17534 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17535 ethtype = "10/100Base-TX";
17536 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17537 ethtype = "1000Base-SX";
17539 ethtype = "10/100/1000Base-T";
17541 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17542 "(WireSpeed[%d], EEE[%d])\n",
17543 tg3_phy_string(tp), ethtype,
17544 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17545 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17548 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17549 (dev->features & NETIF_F_RXCSUM) != 0,
17550 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17551 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17552 tg3_flag(tp, ENABLE_ASF) != 0,
17553 tg3_flag(tp, TSO_CAPABLE) != 0);
17554 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17556 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17557 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17559 pci_save_state(pdev);
17565 iounmap(tp->aperegs);
17566 tp->aperegs = NULL;
17579 pci_release_regions(pdev);
17581 err_out_disable_pdev:
17582 pci_disable_device(pdev);
17583 pci_set_drvdata(pdev, NULL);
17587 static void tg3_remove_one(struct pci_dev *pdev)
17589 struct net_device *dev = pci_get_drvdata(pdev);
17592 struct tg3 *tp = netdev_priv(dev);
17594 release_firmware(tp->fw);
17596 tg3_reset_task_cancel(tp);
17598 if (tg3_flag(tp, USE_PHYLIB)) {
17603 unregister_netdev(dev);
17605 iounmap(tp->aperegs);
17606 tp->aperegs = NULL;
17613 pci_release_regions(pdev);
17614 pci_disable_device(pdev);
17615 pci_set_drvdata(pdev, NULL);
17619 #ifdef CONFIG_PM_SLEEP
17620 static int tg3_suspend(struct device *device)
17622 struct pci_dev *pdev = to_pci_dev(device);
17623 struct net_device *dev = pci_get_drvdata(pdev);
17624 struct tg3 *tp = netdev_priv(dev);
17627 if (!netif_running(dev))
17630 tg3_reset_task_cancel(tp);
17632 tg3_netif_stop(tp);
17634 tg3_timer_stop(tp);
17636 tg3_full_lock(tp, 1);
17637 tg3_disable_ints(tp);
17638 tg3_full_unlock(tp);
17640 netif_device_detach(dev);
17642 tg3_full_lock(tp, 0);
17643 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17644 tg3_flag_clear(tp, INIT_COMPLETE);
17645 tg3_full_unlock(tp);
17647 err = tg3_power_down_prepare(tp);
17651 tg3_full_lock(tp, 0);
17653 tg3_flag_set(tp, INIT_COMPLETE);
17654 err2 = tg3_restart_hw(tp, true);
17658 tg3_timer_start(tp);
17660 netif_device_attach(dev);
17661 tg3_netif_start(tp);
17664 tg3_full_unlock(tp);
17673 static int tg3_resume(struct device *device)
17675 struct pci_dev *pdev = to_pci_dev(device);
17676 struct net_device *dev = pci_get_drvdata(pdev);
17677 struct tg3 *tp = netdev_priv(dev);
17680 if (!netif_running(dev))
17683 netif_device_attach(dev);
17685 tg3_full_lock(tp, 0);
17687 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17689 tg3_flag_set(tp, INIT_COMPLETE);
17690 err = tg3_restart_hw(tp,
17691 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17695 tg3_timer_start(tp);
17697 tg3_netif_start(tp);
17700 tg3_full_unlock(tp);
17707 #endif /* CONFIG_PM_SLEEP */
17709 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17712 * tg3_io_error_detected - called when PCI error is detected
17713 * @pdev: Pointer to PCI device
17714 * @state: The current pci connection state
17716 * This function is called after a PCI bus error affecting
17717 * this device has been detected.
17719 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17720 pci_channel_state_t state)
17722 struct net_device *netdev = pci_get_drvdata(pdev);
17723 struct tg3 *tp = netdev_priv(netdev);
17724 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17726 netdev_info(netdev, "PCI I/O error detected\n");
17730 if (!netif_running(netdev))
17735 tg3_netif_stop(tp);
17737 tg3_timer_stop(tp);
17739 /* Want to make sure that the reset task doesn't run */
17740 tg3_reset_task_cancel(tp);
17742 netif_device_detach(netdev);
17744 /* Clean up software state, even if MMIO is blocked */
17745 tg3_full_lock(tp, 0);
17746 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17747 tg3_full_unlock(tp);
17750 if (state == pci_channel_io_perm_failure)
17751 err = PCI_ERS_RESULT_DISCONNECT;
17753 pci_disable_device(pdev);
17761 * tg3_io_slot_reset - called after the pci bus has been reset.
17762 * @pdev: Pointer to PCI device
17764 * Restart the card from scratch, as if from a cold-boot.
17765 * At this point, the card has exprienced a hard reset,
17766 * followed by fixups by BIOS, and has its config space
17767 * set up identically to what it was at cold boot.
17769 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17771 struct net_device *netdev = pci_get_drvdata(pdev);
17772 struct tg3 *tp = netdev_priv(netdev);
17773 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17778 if (pci_enable_device(pdev)) {
17779 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17783 pci_set_master(pdev);
17784 pci_restore_state(pdev);
17785 pci_save_state(pdev);
17787 if (!netif_running(netdev)) {
17788 rc = PCI_ERS_RESULT_RECOVERED;
17792 err = tg3_power_up(tp);
17796 rc = PCI_ERS_RESULT_RECOVERED;
17805 * tg3_io_resume - called when traffic can start flowing again.
17806 * @pdev: Pointer to PCI device
17808 * This callback is called when the error recovery driver tells
17809 * us that its OK to resume normal operation.
17811 static void tg3_io_resume(struct pci_dev *pdev)
17813 struct net_device *netdev = pci_get_drvdata(pdev);
17814 struct tg3 *tp = netdev_priv(netdev);
17819 if (!netif_running(netdev))
17822 tg3_full_lock(tp, 0);
17823 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17824 tg3_flag_set(tp, INIT_COMPLETE);
17825 err = tg3_restart_hw(tp, true);
17827 tg3_full_unlock(tp);
17828 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17832 netif_device_attach(netdev);
17834 tg3_timer_start(tp);
17836 tg3_netif_start(tp);
17838 tg3_full_unlock(tp);
17846 static const struct pci_error_handlers tg3_err_handler = {
17847 .error_detected = tg3_io_error_detected,
17848 .slot_reset = tg3_io_slot_reset,
17849 .resume = tg3_io_resume
17852 static struct pci_driver tg3_driver = {
17853 .name = DRV_MODULE_NAME,
17854 .id_table = tg3_pci_tbl,
17855 .probe = tg3_init_one,
17856 .remove = tg3_remove_one,
17857 .err_handler = &tg3_err_handler,
17858 .driver.pm = &tg3_pm_ops,
17861 module_pci_driver(tg3_driver);