2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
75 /* Functions & macros to verify TG3_FLAGS types */
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 return test_bit(flag, bits);
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 clear_bit(flag, bits);
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
126 #define TG3_TX_TIMEOUT (5 * HZ)
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 #define TG3_DMA_BYTE_ENAB 64
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
211 #define TG3_RAW_IP_ALIGN 2
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
395 { "tx_flow_control" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
428 { "rx_threshold_hit" },
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
440 { "mbuf_lwm_thresh_hit" },
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 writel(val, tp->regs + off);
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 return readl(tp->regs + off);
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->aperegs + off);
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 return readl(tp->aperegs + off);
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
572 tg3_write32(tp, off, val);
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 void __iomem *mbox = tp->regs + off;
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 return readl(tp->regs + off + GRCMBOX_BASE);
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 writel(val, tp->regs + off + GRCMBOX_BASE);
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 static void tg3_ape_lock_init(struct tg3 *tp)
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
685 regbase = TG3_APE_PER_LOCK_GRANT;
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
698 bit = APE_LOCK_GRANT_DRIVER;
700 bit = 1 << tp->pci_fn;
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
711 u32 status, req, gnt, bit;
713 if (!tg3_flag(tp, ENABLE_APE))
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
724 bit = APE_LOCK_REQ_DRIVER;
726 bit = 1 << tp->pci_fn;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
748 tg3_ape_write32(tp, req + off, bit);
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
755 if (pci_channel_offline(tp->pdev))
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 if (!tg3_flag(tp, ENABLE_APE))
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
785 bit = APE_LOCK_GRANT_DRIVER;
787 bit = 1 << tp->pci_fn;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
802 gnt = TG3_APE_PER_LOCK_GRANT;
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 return timeout_us ? 0 : -EBUSY;
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
842 return i == timeout_us / 10;
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
849 u32 i, bufoff, msgoff, maxlen, apedata;
851 if (!tg3_flag(tp, APE_HAS_NCSI))
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 if (tg3_ape_wait_for_event(tp, 30000))
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 if (!tg3_flag(tp, ENABLE_APE))
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
961 event = APE_EVENT_STATUS_STATE_START;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 tg3_ape_send_event(tp, event);
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
997 static void tg3_disable_ints(struct tg3 *tp)
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1007 static void tg3_enable_ints(struct tg3 *tp)
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 tp->coal_now |= tnapi->coal_now;
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 tw32(HOSTCC_MODE, tp->coal_now);
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 struct tg3 *tp = tnapi->tp;
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1082 static void tg3_switch_clocks(struct tg3 *tp)
1085 u32 orig_clock_ctrl;
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1096 tp->pci_clock_ctrl = clock_ctrl;
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1115 #define PHY_BUSY_LOOPS 5000
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 tw32_f(MAC_MI_COM, frame_val);
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1145 frame_val = tr32(MAC_MI_COM);
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1149 frame_val = tr32(MAC_MI_COM);
1157 *val = frame_val & MI_COM_DATA_MASK;
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 tw32_f(MAC_MI_COM, frame_val);
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1210 frame_val = tr32(MAC_MI_COM);
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370 if ((phy_control & BMCR_RESET) == 0) {
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 struct tg3 *tp = bp->priv;
1387 spin_lock_bh(&tp->lock);
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1392 spin_unlock_bh(&tp->lock);
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 struct tg3 *tp = bp->priv;
1402 spin_lock_bh(&tp->lock);
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1407 spin_unlock_bh(&tp->lock);
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1415 struct phy_device *phydev;
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1456 tw32(MAC_PHYCFG2, val);
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1490 tw32(MAC_EXT_RGMII_MODE, val);
1493 static void tg3_mdio_start(struct tg3 *tp)
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1504 static int tg3_mdio_init(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (tg3_flag(tp, 5717_PLUS)) {
1513 tp->phy_addr = tp->pci_fn + 1;
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1528 tp->phy_addr = addr;
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1557 i = mdiobus_register(tp->mdio_bus);
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1604 static void tg3_mdio_fini(struct tg3 *tp)
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1622 tp->last_event_jiffies = jiffies;
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1631 unsigned int delay_cnt;
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 if (time_remain < 0)
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 if (pci_channel_offline(tp->pdev))
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1700 tg3_phy_gather_ump_data(tp, data);
1702 tg3_wait_for_event_ack(tp);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 tg3_generate_fw_event(tp);
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 tg3_generate_fw_event(tp);
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806 static int tg3_poll_fw(struct tg3 *tp)
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 if (pci_channel_offline(tp->pdev))
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 netdev_info(tp->dev, "No firmware running\n");
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1870 static void tg3_link_report(struct tg3 *tp)
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1879 (tp->link_config.active_speed == SPEED_100 ?
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1894 tg3_ump_link_report(tp);
1897 tp->link_up = netif_carrier_ok(tp->dev);
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 autoneg = tp->link_config.autoneg;
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 flowctrl = tp->link_config.flowctrl;
1980 tp->link_config.active_flowctrl = flowctrl;
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1999 static void tg3_adjust_link(struct net_device *dev)
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 spin_lock_bh(&tp->lock);
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2011 oldflowctrl = tp->link_config.active_flowctrl;
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2077 spin_unlock_bh(&tp->lock);
2080 tg3_link_report(tp);
2083 static int tg3_phy_init(struct tg3 *tp)
2085 struct phy_device *phydev;
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2090 /* Bring the PHY back to a known state. */
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 phy_attached_info(phydev);
2129 static void tg3_phy_start(struct tg3 *tp)
2131 struct phy_device *phydev;
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2149 phy_start_aneg(phydev);
2152 static void tg3_phy_stop(struct tg3 *tp)
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 static void tg3_phy_fini(struct tg3 *tp)
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2344 struct ethtool_eee *dest = &tp->eee;
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2360 dest->eee_active = 0;
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 dest->eee_enabled = !!val;
2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2440 static int tg3_wait_macro_done(struct tg3 *tp)
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2468 for (chan = 0; chan < 4; chan++) {
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2499 for (i = 0; i < 6; i += 2) {
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2528 for (chan = 0; chan < 4; chan++) {
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2553 err = tg3_bmcr_reset(tp);
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2587 } while (--retries);
2589 err = tg3_phy_reset_chanpat(tp);
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 static void tg3_carrier_off(struct tg3 *tp)
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2628 static int tg3_phy_reset(struct tg3 *tp)
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2666 err = tg3_bmcr_reset(tp);
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2692 tg3_phy_apply_otp(tp);
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2697 tg3_phy_toggle_apd(tp, false);
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 if (!tg3_flag(tp, IS_NIC))
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 if (!tg3_flag(tp, IS_NIC))
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 u32 grc_local_ctrl = 0;
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2945 msg = tg3_set_function_status(tp, msg);
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2953 tg3_pwrsrc_die_with_vmain(tp);
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 bool need_vaux = false;
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 /* remove_one() may have been run on the peer. */
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2998 tg3_pwrsrc_switch_to_vaux(tp);
3000 tg3_pwrsrc_die_with_vmain(tp);
3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3010 } else if (speed == SPEED_10)
3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 switch (tg3_asic_rev(tp)) {
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 switch (tg3_asic_rev(tp)) {
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 MII_TG3_FET_SHDW_AUXMODE4,
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3112 /* The PHY should not be powered down on some chips because
3115 if (tg3_phy_power_bug(tp))
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3129 /* tp->lock is held. */
3130 static int tg3_nvram_lock(struct tg3 *tp)
3132 if (tg3_flag(tp, NVRAM)) {
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3147 tp->nvram_lock_cnt++;
3152 /* tp->lock is held. */
3153 static void tg3_nvram_unlock(struct tg3 *tp)
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3163 /* tp->lock is held. */
3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3173 /* tp->lock is held. */
3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3195 tw32(GRC_EEPROM_ADDR,
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3212 tmp = tr32(GRC_EEPROM_DATA);
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3223 #define NVRAM_CMD_TIMEOUT 10000
3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238 if (i == NVRAM_CMD_TIMEOUT)
3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 offset = tg3_nvram_phys_addr(tp, offset);
3289 if (offset > NVRAM_ADDR_MSK)
3292 ret = tg3_nvram_lock(tp);
3296 tg3_enable_nvram_access(tp);
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3303 *val = tr32(NVRAM_RDDATA);
3305 tg3_disable_nvram_access(tp);
3307 tg3_nvram_unlock(tp);
3312 /* Ensures NVRAM data is in bytestream format. */
3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3316 int res = tg3_nvram_read(tp, offset, &v);
3318 *val = cpu_to_be32(v);
3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3328 for (i = 0; i < len; i += 4) {
3334 memcpy(&data, buf + i, 4);
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3358 if (val & EEPROM_ADDR_COMPLETE)
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3371 /* offset and length are dword aligned */
3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3387 u32 phy_addr, page_off, size;
3389 phy_addr = offset & ~pagemask;
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3400 page_off = offset & pagemask;
3407 memcpy(tmp + page_off, buf, size);
3409 offset = offset + (pagesize - page_off);
3411 tg3_enable_nvram_access(tp);
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437 for (j = 0; j < pagesize; j += 4) {
3440 data = *((__be32 *) (tmp + j));
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 tw32(NVRAM_ADDR, phy_addr + j);
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 /* offset and length are dword aligned */
3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 page_off = offset % tp->nvram_pagesize;
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3495 nvram_cmd |= NVRAM_CMD_LAST;
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 /* offset and length are dword aligned */
3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3541 ret = tg3_nvram_lock(tp);
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3580 /* tp->lock is held. */
3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3584 const int iters = 10000;
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 if (pci_channel_offline(tp->pdev))
3595 return (i == iters) ? -EBUSY : 0;
3598 /* tp->lock is held. */
3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3610 /* tp->lock is held. */
3611 static int tg3_txcpu_pause(struct tg3 *tp)
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3616 /* tp->lock is held. */
3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3623 /* tp->lock is held. */
3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3629 /* tp->lock is held. */
3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3646 * There is only an Rx CPU for the 5750 derivative in the
3649 if (tg3_flag(tp, IS_SSB_CORE))
3652 rc = tg3_txcpu_pause(tp);
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3689 fw_len = tp->fw->size;
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3694 /* tp->lock is held. */
3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3713 write_op = tg3_write_indirect_reg32;
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3722 tg3_nvram_unlock(tp);
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3735 total_len -= TG3_FW_HDR_LEN;
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 be32_to_cpu(fw_data[i]));
3747 total_len -= be32_to_cpu(fw_hdr->len);
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3760 /* tp->lock is held. */
3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3764 const int iters = 5;
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3778 return (i == iters) ? -EBUSY : 0;
3781 /* tp->lock is held. */
3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 const struct tg3_firmware_hdr *fw_hdr;
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3818 tg3_rxcpu_resume(tp);
3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 const int iters = 1000;
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3854 /* tp->lock is held. */
3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 struct tg3_firmware_hdr *fw_hdr;
3859 if (!tg3_flag(tp, NO_NVRAM))
3862 if (tg3_validate_rxcpu_state(tp))
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3886 if (tg3_rxcpu_pause(tp))
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 tg3_rxcpu_resume(tp);
3895 /* tp->lock is held. */
3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3902 if (!tg3_flag(tp, FW_TSO))
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3913 cpu_scratch_size = tp->fw_len;
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3941 tg3_resume_cpu(tp, cpu_base);
3945 /* tp->lock is held. */
3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3949 u32 addr_high, addr_low;
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965 /* tp->lock is held. */
3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3993 static void tg3_enable_register_access(struct tg3 *tp)
3996 * Make sure register accesses (indirect or otherwise) will function
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4003 static int tg3_power_up(struct tg3 *tp)
4007 tg3_enable_register_access(tp);
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4020 static int tg3_setup_phy(struct tg3 *, bool);
4022 static int tg3_power_down_prepare(struct tg3 *tp)
4025 bool device_should_wake, do_low_power;
4027 tg3_enable_register_access(tp);
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4095 do_low_power = true;
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4126 if (device_should_wake) {
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4177 tw32_f(MAC_MODE, mac_mode);
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4242 tg3_frob_aux_power(tp, true);
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4258 tg3_nvram_unlock(tp);
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269 static void tg3_power_down(struct tg3 *tp)
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4280 *duplex = DUPLEX_HALF;
4283 case MII_TG3_AUX_STAT_10FULL:
4285 *duplex = DUPLEX_FULL;
4288 case MII_TG3_AUX_STAT_100HALF:
4290 *duplex = DUPLEX_HALF;
4293 case MII_TG3_AUX_STAT_100FULL:
4295 *duplex = DUPLEX_FULL;
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4365 if (!tp->eee.eee_enabled) {
4367 tp->eee.advertised = 0;
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4378 switch (tg3_asic_rev(tp)) {
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4383 /* If we advertised any eee advertisements above... */
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4433 fc = tp->link_config.flowctrl;
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4451 u32 bmcr, orig_bmcr;
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4465 switch (tp->link_config.speed) {
4471 bmcr |= BMCR_SPEED100;
4475 bmcr |= BMCR_SPEED1000;
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4492 if (!(tmp & BMSR_LSTATUS)) {
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4503 static int tg3_phy_pull_config(struct tg3 *tp)
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4524 tp->link_config.speed = SPEED_10;
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4530 tp->link_config.speed = SPEED_100;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4545 tp->link_config.duplex = DUPLEX_HALF;
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4593 tp->link_config.advertising |= adv;
4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4621 struct ethtool_eee eee;
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4626 tg3_eee_pull_config(tp, &eee);
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4634 /* EEE is disabled but we're advertising */
4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4644 u32 advmsk, tgtadv, advertising;
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4658 if ((*lcladv & advmsk) != tgtadv)
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4679 if (tg3_ctrl != tgtadv)
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4710 if (curr_link_up != tp->link_up) {
4712 netif_carrier_on(tp->dev);
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4719 tg3_link_report(tp);
4726 static void tg3_clear_mac_status(struct tg3 *tp)
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4738 static void tg3_setup_eee(struct tg3 *tp)
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4776 bool current_link_up;
4778 u32 lcl_adv, rmt_adv;
4783 tg3_clear_mac_status(tp);
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4793 /* Some third-party PHYs need to be reset on link going
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4835 err = tg3_init_5401phy_dsp(tp);
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4894 if (bmsr & BMSR_LSTATUS) {
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4914 if (bmcr && bmcr != 0x7fff)
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4928 if ((bmcr & BMCR_ANENABLE) &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5018 tw32(MAC_LED_CTRL, led_ctrl);
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5044 tw32_f(MAC_MODE, tp->mac_mode);
5047 tg3_phy_eee_adjust(tp, current_link_up);
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5087 struct tg3_fiber_aneginfo {
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5123 #define MR_LINK_OK 0x80000000
5125 unsigned long link_time, cur_time;
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5130 char ability_match, idle_match, ack_match;
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5149 #define ANEG_STATE_SETTLE_TIME 10000
5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5155 unsigned long delta;
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5200 ap->rxconfig = rx_cfg_reg;
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5220 ap->state = ANEG_STATE_RESTART_INIT;
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5244 ret = ANEG_TIMER_ENAB;
5247 case ANEG_STATE_DISABLE_LINK_OK:
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5279 ap->state = ANEG_STATE_ACK_DETECT;
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5288 ap->state = ANEG_STATE_AN_ENABLE;
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5325 ap->link_time = ap->cur_time;
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5448 static void tg3_init_bcm8002(struct tg3 *tp)
5450 u32 mac_status = tr32(MAC_STATUS);
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5481 tg3_writephy(tp, 0x13, 0x0000);
5483 tg3_writephy(tp, 0x11, 0x0a50);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5492 /* Deselect the channel register so we can read the PHYID
5495 tg3_writephy(tp, 0x10, 0x8011);
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5509 current_link_up = false;
5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518 /* preserve bits 20-23 for voltage regulator */
5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5522 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527 u32 val = serdes_cfg;
5533 tw32_f(MAC_SERDES_CFG, val);
5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539 tg3_setup_flow_control(tp, 0, 0);
5540 current_link_up = true;
5545 /* Want auto-negotiation. */
5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549 if (flowctrl & ADVERTISE_1000XPAUSE)
5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556 tp->serdes_counter &&
5557 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558 MAC_STATUS_RCVD_CFG)) ==
5559 MAC_STATUS_PCS_SYNCED)) {
5560 tp->serdes_counter--;
5561 current_link_up = true;
5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574 MAC_STATUS_SIGNAL_DET)) {
5575 sg_dig_status = tr32(SG_DIG_STATUS);
5576 mac_status = tr32(MAC_STATUS);
5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580 u32 local_adv = 0, remote_adv = 0;
5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583 local_adv |= ADVERTISE_1000XPAUSE;
5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585 local_adv |= ADVERTISE_1000XPSE_ASYM;
5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588 remote_adv |= LPA_1000XPAUSE;
5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590 remote_adv |= LPA_1000XPAUSE_ASYM;
5592 tp->link_config.rmt_adv =
5593 mii_adv_to_ethtool_adv_x(remote_adv);
5595 tg3_setup_flow_control(tp, local_adv, remote_adv);
5596 current_link_up = true;
5597 tp->serdes_counter = 0;
5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600 if (tp->serdes_counter)
5601 tp->serdes_counter--;
5604 u32 val = serdes_cfg;
5611 tw32_f(MAC_SERDES_CFG, val);
5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5617 /* Link parallel detection - link is up */
5618 /* only if we have PCS_SYNC and not */
5619 /* receiving config code words */
5620 mac_status = tr32(MAC_STATUS);
5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623 tg3_setup_flow_control(tp, 0, 0);
5624 current_link_up = true;
5626 TG3_PHYFLG_PARALLEL_DETECT;
5627 tp->serdes_counter =
5628 SERDES_PARALLEL_DET_TIMEOUT;
5630 goto restart_autoneg;
5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5639 return current_link_up;
5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 bool current_link_up = false;
5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650 u32 txflags, rxflags;
5653 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654 u32 local_adv = 0, remote_adv = 0;
5656 if (txflags & ANEG_CFG_PS1)
5657 local_adv |= ADVERTISE_1000XPAUSE;
5658 if (txflags & ANEG_CFG_PS2)
5659 local_adv |= ADVERTISE_1000XPSE_ASYM;
5661 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE;
5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664 remote_adv |= LPA_1000XPAUSE_ASYM;
5666 tp->link_config.rmt_adv =
5667 mii_adv_to_ethtool_adv_x(remote_adv);
5669 tg3_setup_flow_control(tp, local_adv, remote_adv);
5671 current_link_up = true;
5673 for (i = 0; i < 30; i++) {
5676 (MAC_STATUS_SYNC_CHANGED |
5677 MAC_STATUS_CFG_CHANGED));
5679 if ((tr32(MAC_STATUS) &
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED)) == 0)
5685 mac_status = tr32(MAC_STATUS);
5686 if (!current_link_up &&
5687 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688 !(mac_status & MAC_STATUS_RCVD_CFG))
5689 current_link_up = true;
5691 tg3_setup_flow_control(tp, 0, 0);
5693 /* Forcing 1000FD link up. */
5694 current_link_up = true;
5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5699 tw32_f(MAC_MODE, tp->mac_mode);
5704 return current_link_up;
5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5710 u32 orig_active_speed;
5711 u8 orig_active_duplex;
5713 bool current_link_up;
5716 orig_pause_cfg = tp->link_config.active_flowctrl;
5717 orig_active_speed = tp->link_config.active_speed;
5718 orig_active_duplex = tp->link_config.active_duplex;
5720 if (!tg3_flag(tp, HW_AUTONEG) &&
5722 tg3_flag(tp, INIT_COMPLETE)) {
5723 mac_status = tr32(MAC_STATUS);
5724 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725 MAC_STATUS_SIGNAL_DET |
5726 MAC_STATUS_CFG_CHANGED |
5727 MAC_STATUS_RCVD_CFG);
5728 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET)) {
5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731 MAC_STATUS_CFG_CHANGED));
5736 tw32_f(MAC_TX_AUTO_NEG, 0);
5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740 tw32_f(MAC_MODE, tp->mac_mode);
5743 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744 tg3_init_bcm8002(tp);
5746 /* Enable link change event even when serdes polling. */
5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5780 tw32_f(MAC_MODE, tp->mac_mode);
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5846 current_duplex = DUPLEX_HALF;
5849 tw32_f(MAC_MODE, tp->mac_mode);
5852 tg3_clear_mac_status(tp);
5854 goto fiber_setup_done;
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5861 tg3_clear_mac_status(tp);
5866 tp->link_config.rmt_adv = 0;
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5874 bmsr &= ~BMSR_LSTATUS;
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5918 new_bmcr |= BMCR_SPEED1000;
5920 /* Force a linkdown */
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5933 tg3_carrier_off(tp);
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5943 bmsr &= ~BMSR_LSTATUS;
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5955 current_duplex = DUPLEX_HALF;
5960 if (bmcr & BMCR_ANENABLE) {
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5971 current_duplex = DUPLEX_HALF;
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5978 current_link_up = false;
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991 tw32_f(MAC_MODE, tp->mac_mode);
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073 err = tg3_setup_copper_phy(tp, force_reset);
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 struct tg3 *tp = netdev_priv(dev);
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 info->phc_index = -1;
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6189 /* Frequency adjustment is performed using hardware with a 24 bit
6190 * accumulator and a programmable correction value. On each clk, the
6191 * correction value gets added to the accumulator and when it
6192 * overflows, the time counter is incremented/decremented.
6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6196 tg3_full_lock(tp, 0);
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 TG3_EAV_REF_CLK_CORRECT_EN |
6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206 tg3_full_unlock(tp);
6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223 struct ptp_system_timestamp *sts)
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp, sts);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6233 *ts = ns_to_timespec64(ns);
6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 ns = timespec64_to_ns(ts);
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6249 tg3_full_unlock(tp);
6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6262 case PTP_CLK_REQ_PEROUT:
6263 /* Reject requests with unsupported flags */
6264 if (rq->perout.flags)
6267 if (rq->perout.index != 0)
6270 tg3_full_lock(tp, 0);
6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6277 nsec = rq->perout.start.sec * 1000000000ULL +
6278 rq->perout.start.nsec;
6280 if (rq->perout.period.sec || rq->perout.period.nsec) {
6281 netdev_warn(tp->dev,
6282 "Device supports only a one-shot timesync output, period must be 0\n");
6287 if (nsec & (1ULL << 63)) {
6288 netdev_warn(tp->dev,
6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295 tw32(TG3_EAV_WATCHDOG0_MSB,
6296 TG3_EAV_WATCHDOG0_EN |
6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299 tw32(TG3_EAV_REF_CLCK_CTL,
6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6307 tg3_full_unlock(tp);
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318 struct skb_shared_hwtstamps *timestamp)
6320 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6325 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6327 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6328 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6331 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6333 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6334 struct skb_shared_hwtstamps timestamp;
6337 if (tp->ptp_txts_retrycnt > 2)
6340 tg3_read_tx_tstamp(tp, &hwclock);
6342 if (hwclock != tp->pre_tx_ts) {
6343 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6344 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6347 tp->ptp_txts_retrycnt++;
6350 dev_consume_skb_any(tp->tx_tstamp_skb);
6351 tp->tx_tstamp_skb = NULL;
6352 tp->ptp_txts_retrycnt = 0;
6357 static const struct ptp_clock_info tg3_ptp_caps = {
6358 .owner = THIS_MODULE,
6359 .name = "tg3 clock",
6360 .max_adj = 250000000,
6366 .adjfine = tg3_ptp_adjfine,
6367 .adjtime = tg3_ptp_adjtime,
6368 .do_aux_work = tg3_ptp_ts_aux_work,
6369 .gettimex64 = tg3_ptp_gettimex,
6370 .settime64 = tg3_ptp_settime,
6371 .enable = tg3_ptp_enable,
6374 /* tp->lock must be held */
6375 static void tg3_ptp_init(struct tg3 *tp)
6377 if (!tg3_flag(tp, PTP_CAPABLE))
6380 /* Initialize the hardware clock to the system time. */
6381 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6383 tp->ptp_info = tg3_ptp_caps;
6386 /* tp->lock must be held */
6387 static void tg3_ptp_resume(struct tg3 *tp)
6389 if (!tg3_flag(tp, PTP_CAPABLE))
6392 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6396 static void tg3_ptp_fini(struct tg3 *tp)
6398 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6401 ptp_clock_unregister(tp->ptp_clock);
6402 tp->ptp_clock = NULL;
6404 dev_consume_skb_any(tp->tx_tstamp_skb);
6405 tp->tx_tstamp_skb = NULL;
6408 static inline int tg3_irq_sync(struct tg3 *tp)
6410 return tp->irq_sync;
6413 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6417 dst = (u32 *)((u8 *)dst + off);
6418 for (i = 0; i < len; i += sizeof(u32))
6419 *dst++ = tr32(off + i);
6422 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6424 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6425 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6426 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6427 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6428 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6429 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6430 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6431 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6432 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6433 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6434 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6435 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6436 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6437 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6438 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6439 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6440 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6441 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6442 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6444 if (tg3_flag(tp, SUPPORT_MSIX))
6445 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6447 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6448 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6449 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6450 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6451 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6452 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6453 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6454 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6456 if (!tg3_flag(tp, 5705_PLUS)) {
6457 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6458 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6459 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6462 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6463 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6464 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6465 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6466 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6468 if (tg3_flag(tp, NVRAM))
6469 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6472 static void tg3_dump_state(struct tg3 *tp)
6477 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6481 if (tg3_flag(tp, PCI_EXPRESS)) {
6482 /* Read up to but not including private PCI registers */
6483 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6484 regs[i / sizeof(u32)] = tr32(i);
6486 tg3_dump_legacy_regs(tp, regs);
6488 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6489 if (!regs[i + 0] && !regs[i + 1] &&
6490 !regs[i + 2] && !regs[i + 3])
6493 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6495 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6500 for (i = 0; i < tp->irq_cnt; i++) {
6501 struct tg3_napi *tnapi = &tp->napi[i];
6503 /* SW status block */
6505 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6507 tnapi->hw_status->status,
6508 tnapi->hw_status->status_tag,
6509 tnapi->hw_status->rx_jumbo_consumer,
6510 tnapi->hw_status->rx_consumer,
6511 tnapi->hw_status->rx_mini_consumer,
6512 tnapi->hw_status->idx[0].rx_producer,
6513 tnapi->hw_status->idx[0].tx_consumer);
6516 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6518 tnapi->last_tag, tnapi->last_irq_tag,
6519 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6521 tnapi->prodring.rx_std_prod_idx,
6522 tnapi->prodring.rx_std_cons_idx,
6523 tnapi->prodring.rx_jmb_prod_idx,
6524 tnapi->prodring.rx_jmb_cons_idx);
6528 /* This is called whenever we suspect that the system chipset is re-
6529 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6530 * is bogus tx completions. We try to recover by setting the
6531 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6534 static void tg3_tx_recover(struct tg3 *tp)
6536 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6537 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6539 netdev_warn(tp->dev,
6540 "The system may be re-ordering memory-mapped I/O "
6541 "cycles to the network device, attempting to recover. "
6542 "Please report the problem to the driver maintainer "
6543 "and include system chipset information.\n");
6545 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6548 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6550 /* Tell compiler to fetch tx indices from memory. */
6552 return tnapi->tx_pending -
6553 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6556 /* Tigon3 never reports partial packet sends. So we do not
6557 * need special logic to handle SKBs that have not had all
6558 * of their frags sent yet, like SunGEM does.
6560 static void tg3_tx(struct tg3_napi *tnapi)
6562 struct tg3 *tp = tnapi->tp;
6563 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6564 u32 sw_idx = tnapi->tx_cons;
6565 struct netdev_queue *txq;
6566 int index = tnapi - tp->napi;
6567 unsigned int pkts_compl = 0, bytes_compl = 0;
6569 if (tg3_flag(tp, ENABLE_TSS))
6572 txq = netdev_get_tx_queue(tp->dev, index);
6574 while (sw_idx != hw_idx) {
6575 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6576 bool complete_skb_later = false;
6577 struct sk_buff *skb = ri->skb;
6580 if (unlikely(skb == NULL)) {
6585 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6586 struct skb_shared_hwtstamps timestamp;
6589 tg3_read_tx_tstamp(tp, &hwclock);
6590 if (hwclock != tp->pre_tx_ts) {
6591 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6592 skb_tstamp_tx(skb, ×tamp);
6595 tp->tx_tstamp_skb = skb;
6596 complete_skb_later = true;
6600 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6601 skb_headlen(skb), DMA_TO_DEVICE);
6605 while (ri->fragmented) {
6606 ri->fragmented = false;
6607 sw_idx = NEXT_TX(sw_idx);
6608 ri = &tnapi->tx_buffers[sw_idx];
6611 sw_idx = NEXT_TX(sw_idx);
6613 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6614 ri = &tnapi->tx_buffers[sw_idx];
6615 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6618 dma_unmap_page(&tp->pdev->dev,
6619 dma_unmap_addr(ri, mapping),
6620 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6623 while (ri->fragmented) {
6624 ri->fragmented = false;
6625 sw_idx = NEXT_TX(sw_idx);
6626 ri = &tnapi->tx_buffers[sw_idx];
6629 sw_idx = NEXT_TX(sw_idx);
6633 bytes_compl += skb->len;
6635 if (!complete_skb_later)
6636 dev_consume_skb_any(skb);
6638 ptp_schedule_worker(tp->ptp_clock, 0);
6640 if (unlikely(tx_bug)) {
6646 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6648 tnapi->tx_cons = sw_idx;
6650 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6651 * before checking for netif_queue_stopped(). Without the
6652 * memory barrier, there is a small possibility that __tg3_start_xmit()
6653 * will miss it and cause the queue to be stopped forever.
6657 if (unlikely(netif_tx_queue_stopped(txq) &&
6658 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6659 __netif_tx_lock(txq, smp_processor_id());
6660 if (netif_tx_queue_stopped(txq) &&
6661 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6662 netif_tx_wake_queue(txq);
6663 __netif_tx_unlock(txq);
6667 static void tg3_frag_free(bool is_frag, void *data)
6670 skb_free_frag(data);
6675 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6677 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6678 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6683 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6685 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6690 /* Returns size of skb allocated or < 0 on error.
6692 * We only need to fill in the address because the other members
6693 * of the RX descriptor are invariant, see tg3_init_rings.
6695 * Note the purposeful assymetry of cpu vs. chip accesses. For
6696 * posting buffers we only dirty the first cache line of the RX
6697 * descriptor (containing the address). Whereas for the RX status
6698 * buffers the cpu only reads the last cacheline of the RX descriptor
6699 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6701 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6702 u32 opaque_key, u32 dest_idx_unmasked,
6703 unsigned int *frag_size)
6705 struct tg3_rx_buffer_desc *desc;
6706 struct ring_info *map;
6709 int skb_size, data_size, dest_idx;
6711 switch (opaque_key) {
6712 case RXD_OPAQUE_RING_STD:
6713 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6714 desc = &tpr->rx_std[dest_idx];
6715 map = &tpr->rx_std_buffers[dest_idx];
6716 data_size = tp->rx_pkt_map_sz;
6719 case RXD_OPAQUE_RING_JUMBO:
6720 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6721 desc = &tpr->rx_jmb[dest_idx].std;
6722 map = &tpr->rx_jmb_buffers[dest_idx];
6723 data_size = TG3_RX_JMB_MAP_SZ;
6730 /* Do not overwrite any of the map or rp information
6731 * until we are sure we can commit to a new buffer.
6733 * Callers depend upon this behavior and assume that
6734 * we leave everything unchanged if we fail.
6736 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6738 if (skb_size <= PAGE_SIZE) {
6739 data = napi_alloc_frag(skb_size);
6740 *frag_size = skb_size;
6742 data = kmalloc(skb_size, GFP_ATOMIC);
6748 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6749 data_size, DMA_FROM_DEVICE);
6750 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6751 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6756 dma_unmap_addr_set(map, mapping, mapping);
6758 desc->addr_hi = ((u64)mapping >> 32);
6759 desc->addr_lo = ((u64)mapping & 0xffffffff);
6764 /* We only need to move over in the address because the other
6765 * members of the RX descriptor are invariant. See notes above
6766 * tg3_alloc_rx_data for full details.
6768 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6769 struct tg3_rx_prodring_set *dpr,
6770 u32 opaque_key, int src_idx,
6771 u32 dest_idx_unmasked)
6773 struct tg3 *tp = tnapi->tp;
6774 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6775 struct ring_info *src_map, *dest_map;
6776 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6779 switch (opaque_key) {
6780 case RXD_OPAQUE_RING_STD:
6781 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6782 dest_desc = &dpr->rx_std[dest_idx];
6783 dest_map = &dpr->rx_std_buffers[dest_idx];
6784 src_desc = &spr->rx_std[src_idx];
6785 src_map = &spr->rx_std_buffers[src_idx];
6788 case RXD_OPAQUE_RING_JUMBO:
6789 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6790 dest_desc = &dpr->rx_jmb[dest_idx].std;
6791 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6792 src_desc = &spr->rx_jmb[src_idx].std;
6793 src_map = &spr->rx_jmb_buffers[src_idx];
6800 dest_map->data = src_map->data;
6801 dma_unmap_addr_set(dest_map, mapping,
6802 dma_unmap_addr(src_map, mapping));
6803 dest_desc->addr_hi = src_desc->addr_hi;
6804 dest_desc->addr_lo = src_desc->addr_lo;
6806 /* Ensure that the update to the skb happens after the physical
6807 * addresses have been transferred to the new BD location.
6811 src_map->data = NULL;
6814 /* The RX ring scheme is composed of multiple rings which post fresh
6815 * buffers to the chip, and one special ring the chip uses to report
6816 * status back to the host.
6818 * The special ring reports the status of received packets to the
6819 * host. The chip does not write into the original descriptor the
6820 * RX buffer was obtained from. The chip simply takes the original
6821 * descriptor as provided by the host, updates the status and length
6822 * field, then writes this into the next status ring entry.
6824 * Each ring the host uses to post buffers to the chip is described
6825 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6826 * it is first placed into the on-chip ram. When the packet's length
6827 * is known, it walks down the TG3_BDINFO entries to select the ring.
6828 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6829 * which is within the range of the new packet's length is chosen.
6831 * The "separate ring for rx status" scheme may sound queer, but it makes
6832 * sense from a cache coherency perspective. If only the host writes
6833 * to the buffer post rings, and only the chip writes to the rx status
6834 * rings, then cache lines never move beyond shared-modified state.
6835 * If both the host and chip were to write into the same ring, cache line
6836 * eviction could occur since both entities want it in an exclusive state.
6838 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6840 struct tg3 *tp = tnapi->tp;
6841 u32 work_mask, rx_std_posted = 0;
6842 u32 std_prod_idx, jmb_prod_idx;
6843 u32 sw_idx = tnapi->rx_rcb_ptr;
6846 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6848 hw_idx = *(tnapi->rx_rcb_prod_idx);
6850 * We need to order the read of hw_idx and the read of
6851 * the opaque cookie.
6856 std_prod_idx = tpr->rx_std_prod_idx;
6857 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6858 while (sw_idx != hw_idx && budget > 0) {
6859 struct ring_info *ri;
6860 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6862 struct sk_buff *skb;
6863 dma_addr_t dma_addr;
6864 u32 opaque_key, desc_idx, *post_ptr;
6868 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6869 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6870 if (opaque_key == RXD_OPAQUE_RING_STD) {
6871 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6872 dma_addr = dma_unmap_addr(ri, mapping);
6874 post_ptr = &std_prod_idx;
6876 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6877 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6878 dma_addr = dma_unmap_addr(ri, mapping);
6880 post_ptr = &jmb_prod_idx;
6882 goto next_pkt_nopost;
6884 work_mask |= opaque_key;
6886 if (desc->err_vlan & RXD_ERR_MASK) {
6888 tg3_recycle_rx(tnapi, tpr, opaque_key,
6889 desc_idx, *post_ptr);
6891 /* Other statistics kept track of by card. */
6896 prefetch(data + TG3_RX_OFFSET(tp));
6897 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6900 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6901 RXD_FLAG_PTPSTAT_PTPV1 ||
6902 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6903 RXD_FLAG_PTPSTAT_PTPV2) {
6904 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6905 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6908 if (len > TG3_RX_COPY_THRESH(tp)) {
6910 unsigned int frag_size;
6912 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6913 *post_ptr, &frag_size);
6917 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6920 /* Ensure that the update to the data happens
6921 * after the usage of the old DMA mapping.
6928 skb = build_skb(data, frag_size);
6930 skb = slab_build_skb(data);
6932 tg3_frag_free(frag_size != 0, data);
6933 goto drop_it_no_recycle;
6935 skb_reserve(skb, TG3_RX_OFFSET(tp));
6937 tg3_recycle_rx(tnapi, tpr, opaque_key,
6938 desc_idx, *post_ptr);
6940 skb = netdev_alloc_skb(tp->dev,
6941 len + TG3_RAW_IP_ALIGN);
6943 goto drop_it_no_recycle;
6945 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6946 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6949 data + TG3_RX_OFFSET(tp),
6951 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6952 len, DMA_FROM_DEVICE);
6957 tg3_hwclock_to_timestamp(tp, tstamp,
6958 skb_hwtstamps(skb));
6960 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6961 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6962 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6963 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6964 skb->ip_summed = CHECKSUM_UNNECESSARY;
6966 skb_checksum_none_assert(skb);
6968 skb->protocol = eth_type_trans(skb, tp->dev);
6970 if (len > (tp->dev->mtu + ETH_HLEN) &&
6971 skb->protocol != htons(ETH_P_8021Q) &&
6972 skb->protocol != htons(ETH_P_8021AD)) {
6973 dev_kfree_skb_any(skb);
6974 goto drop_it_no_recycle;
6977 if (desc->type_flags & RXD_FLAG_VLAN &&
6978 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6979 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6980 desc->err_vlan & RXD_VLAN_MASK);
6982 napi_gro_receive(&tnapi->napi, skb);
6990 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6991 tpr->rx_std_prod_idx = std_prod_idx &
6992 tp->rx_std_ring_mask;
6993 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6994 tpr->rx_std_prod_idx);
6995 work_mask &= ~RXD_OPAQUE_RING_STD;
7000 sw_idx &= tp->rx_ret_ring_mask;
7002 /* Refresh hw_idx to see if there is new work */
7003 if (sw_idx == hw_idx) {
7004 hw_idx = *(tnapi->rx_rcb_prod_idx);
7009 /* ACK the status ring. */
7010 tnapi->rx_rcb_ptr = sw_idx;
7011 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7013 /* Refill RX ring(s). */
7014 if (!tg3_flag(tp, ENABLE_RSS)) {
7015 /* Sync BD data before updating mailbox */
7018 if (work_mask & RXD_OPAQUE_RING_STD) {
7019 tpr->rx_std_prod_idx = std_prod_idx &
7020 tp->rx_std_ring_mask;
7021 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7022 tpr->rx_std_prod_idx);
7024 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7025 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7026 tp->rx_jmb_ring_mask;
7027 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7028 tpr->rx_jmb_prod_idx);
7030 } else if (work_mask) {
7031 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7032 * updated before the producer indices can be updated.
7036 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7037 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7039 if (tnapi != &tp->napi[1]) {
7040 tp->rx_refill = true;
7041 napi_schedule(&tp->napi[1].napi);
7048 static void tg3_poll_link(struct tg3 *tp)
7050 /* handle link change and other phy events */
7051 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7052 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7054 if (sblk->status & SD_STATUS_LINK_CHG) {
7055 sblk->status = SD_STATUS_UPDATED |
7056 (sblk->status & ~SD_STATUS_LINK_CHG);
7057 spin_lock(&tp->lock);
7058 if (tg3_flag(tp, USE_PHYLIB)) {
7060 (MAC_STATUS_SYNC_CHANGED |
7061 MAC_STATUS_CFG_CHANGED |
7062 MAC_STATUS_MI_COMPLETION |
7063 MAC_STATUS_LNKSTATE_CHANGED));
7066 tg3_setup_phy(tp, false);
7067 spin_unlock(&tp->lock);
7072 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7073 struct tg3_rx_prodring_set *dpr,
7074 struct tg3_rx_prodring_set *spr)
7076 u32 si, di, cpycnt, src_prod_idx;
7080 src_prod_idx = spr->rx_std_prod_idx;
7082 /* Make sure updates to the rx_std_buffers[] entries and the
7083 * standard producer index are seen in the correct order.
7087 if (spr->rx_std_cons_idx == src_prod_idx)
7090 if (spr->rx_std_cons_idx < src_prod_idx)
7091 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7093 cpycnt = tp->rx_std_ring_mask + 1 -
7094 spr->rx_std_cons_idx;
7096 cpycnt = min(cpycnt,
7097 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7099 si = spr->rx_std_cons_idx;
7100 di = dpr->rx_std_prod_idx;
7102 for (i = di; i < di + cpycnt; i++) {
7103 if (dpr->rx_std_buffers[i].data) {
7113 /* Ensure that updates to the rx_std_buffers ring and the
7114 * shadowed hardware producer ring from tg3_recycle_skb() are
7115 * ordered correctly WRT the skb check above.
7119 memcpy(&dpr->rx_std_buffers[di],
7120 &spr->rx_std_buffers[si],
7121 cpycnt * sizeof(struct ring_info));
7123 for (i = 0; i < cpycnt; i++, di++, si++) {
7124 struct tg3_rx_buffer_desc *sbd, *dbd;
7125 sbd = &spr->rx_std[si];
7126 dbd = &dpr->rx_std[di];
7127 dbd->addr_hi = sbd->addr_hi;
7128 dbd->addr_lo = sbd->addr_lo;
7131 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7132 tp->rx_std_ring_mask;
7133 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7134 tp->rx_std_ring_mask;
7138 src_prod_idx = spr->rx_jmb_prod_idx;
7140 /* Make sure updates to the rx_jmb_buffers[] entries and
7141 * the jumbo producer index are seen in the correct order.
7145 if (spr->rx_jmb_cons_idx == src_prod_idx)
7148 if (spr->rx_jmb_cons_idx < src_prod_idx)
7149 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7151 cpycnt = tp->rx_jmb_ring_mask + 1 -
7152 spr->rx_jmb_cons_idx;
7154 cpycnt = min(cpycnt,
7155 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7157 si = spr->rx_jmb_cons_idx;
7158 di = dpr->rx_jmb_prod_idx;
7160 for (i = di; i < di + cpycnt; i++) {
7161 if (dpr->rx_jmb_buffers[i].data) {
7171 /* Ensure that updates to the rx_jmb_buffers ring and the
7172 * shadowed hardware producer ring from tg3_recycle_skb() are
7173 * ordered correctly WRT the skb check above.
7177 memcpy(&dpr->rx_jmb_buffers[di],
7178 &spr->rx_jmb_buffers[si],
7179 cpycnt * sizeof(struct ring_info));
7181 for (i = 0; i < cpycnt; i++, di++, si++) {
7182 struct tg3_rx_buffer_desc *sbd, *dbd;
7183 sbd = &spr->rx_jmb[si].std;
7184 dbd = &dpr->rx_jmb[di].std;
7185 dbd->addr_hi = sbd->addr_hi;
7186 dbd->addr_lo = sbd->addr_lo;
7189 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7190 tp->rx_jmb_ring_mask;
7191 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7192 tp->rx_jmb_ring_mask;
7198 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7200 struct tg3 *tp = tnapi->tp;
7202 /* run TX completion thread */
7203 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7205 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7209 if (!tnapi->rx_rcb_prod_idx)
7212 /* run RX thread, within the bounds set by NAPI.
7213 * All RX "locking" is done by ensuring outside
7214 * code synchronizes with tg3->napi.poll()
7216 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7217 work_done += tg3_rx(tnapi, budget - work_done);
7219 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7220 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7222 u32 std_prod_idx = dpr->rx_std_prod_idx;
7223 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7225 tp->rx_refill = false;
7226 for (i = 1; i <= tp->rxq_cnt; i++)
7227 err |= tg3_rx_prodring_xfer(tp, dpr,
7228 &tp->napi[i].prodring);
7232 if (std_prod_idx != dpr->rx_std_prod_idx)
7233 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7234 dpr->rx_std_prod_idx);
7236 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7237 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7238 dpr->rx_jmb_prod_idx);
7241 tw32_f(HOSTCC_MODE, tp->coal_now);
7247 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7249 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7250 schedule_work(&tp->reset_task);
7253 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7255 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7256 cancel_work_sync(&tp->reset_task);
7257 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7260 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7262 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7263 struct tg3 *tp = tnapi->tp;
7265 struct tg3_hw_status *sblk = tnapi->hw_status;
7268 work_done = tg3_poll_work(tnapi, work_done, budget);
7270 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7273 if (unlikely(work_done >= budget))
7276 /* tp->last_tag is used in tg3_int_reenable() below
7277 * to tell the hw how much work has been processed,
7278 * so we must read it before checking for more work.
7280 tnapi->last_tag = sblk->status_tag;
7281 tnapi->last_irq_tag = tnapi->last_tag;
7284 /* check for RX/TX work to do */
7285 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7286 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7288 /* This test here is not race free, but will reduce
7289 * the number of interrupts by looping again.
7291 if (tnapi == &tp->napi[1] && tp->rx_refill)
7294 napi_complete_done(napi, work_done);
7295 /* Reenable interrupts. */
7296 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7298 /* This test here is synchronized by napi_schedule()
7299 * and napi_complete() to close the race condition.
7301 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7302 tw32(HOSTCC_MODE, tp->coalesce_mode |
7303 HOSTCC_MODE_ENABLE |
7310 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7314 /* work_done is guaranteed to be less than budget. */
7315 napi_complete(napi);
7316 tg3_reset_task_schedule(tp);
7320 static void tg3_process_error(struct tg3 *tp)
7323 bool real_error = false;
7325 if (tg3_flag(tp, ERROR_PROCESSED))
7328 /* Check Flow Attention register */
7329 val = tr32(HOSTCC_FLOW_ATTN);
7330 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7331 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7335 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7336 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7340 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7341 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7350 tg3_flag_set(tp, ERROR_PROCESSED);
7351 tg3_reset_task_schedule(tp);
7354 static int tg3_poll(struct napi_struct *napi, int budget)
7356 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7357 struct tg3 *tp = tnapi->tp;
7359 struct tg3_hw_status *sblk = tnapi->hw_status;
7362 if (sblk->status & SD_STATUS_ERROR)
7363 tg3_process_error(tp);
7367 work_done = tg3_poll_work(tnapi, work_done, budget);
7369 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7372 if (unlikely(work_done >= budget))
7375 if (tg3_flag(tp, TAGGED_STATUS)) {
7376 /* tp->last_tag is used in tg3_int_reenable() below
7377 * to tell the hw how much work has been processed,
7378 * so we must read it before checking for more work.
7380 tnapi->last_tag = sblk->status_tag;
7381 tnapi->last_irq_tag = tnapi->last_tag;
7384 sblk->status &= ~SD_STATUS_UPDATED;
7386 if (likely(!tg3_has_work(tnapi))) {
7387 napi_complete_done(napi, work_done);
7388 tg3_int_reenable(tnapi);
7393 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7397 /* work_done is guaranteed to be less than budget. */
7398 napi_complete(napi);
7399 tg3_reset_task_schedule(tp);
7403 static void tg3_napi_disable(struct tg3 *tp)
7407 for (i = tp->irq_cnt - 1; i >= 0; i--)
7408 napi_disable(&tp->napi[i].napi);
7411 static void tg3_napi_enable(struct tg3 *tp)
7415 for (i = 0; i < tp->irq_cnt; i++)
7416 napi_enable(&tp->napi[i].napi);
7419 static void tg3_napi_init(struct tg3 *tp)
7423 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7424 for (i = 1; i < tp->irq_cnt; i++)
7425 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7428 static void tg3_napi_fini(struct tg3 *tp)
7432 for (i = 0; i < tp->irq_cnt; i++)
7433 netif_napi_del(&tp->napi[i].napi);
7436 static inline void tg3_netif_stop(struct tg3 *tp)
7438 netif_trans_update(tp->dev); /* prevent tx timeout */
7439 tg3_napi_disable(tp);
7440 netif_carrier_off(tp->dev);
7441 netif_tx_disable(tp->dev);
7444 /* tp->lock must be held */
7445 static inline void tg3_netif_start(struct tg3 *tp)
7449 /* NOTE: unconditional netif_tx_wake_all_queues is only
7450 * appropriate so long as all callers are assured to
7451 * have free tx slots (such as after tg3_init_hw)
7453 netif_tx_wake_all_queues(tp->dev);
7456 netif_carrier_on(tp->dev);
7458 tg3_napi_enable(tp);
7459 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7460 tg3_enable_ints(tp);
7463 static void tg3_irq_quiesce(struct tg3 *tp)
7464 __releases(tp->lock)
7465 __acquires(tp->lock)
7469 BUG_ON(tp->irq_sync);
7474 spin_unlock_bh(&tp->lock);
7476 for (i = 0; i < tp->irq_cnt; i++)
7477 synchronize_irq(tp->napi[i].irq_vec);
7479 spin_lock_bh(&tp->lock);
7482 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7483 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7484 * with as well. Most of the time, this is not necessary except when
7485 * shutting down the device.
7487 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7489 spin_lock_bh(&tp->lock);
7491 tg3_irq_quiesce(tp);
7494 static inline void tg3_full_unlock(struct tg3 *tp)
7496 spin_unlock_bh(&tp->lock);
7499 /* One-shot MSI handler - Chip automatically disables interrupt
7500 * after sending MSI so driver doesn't have to do it.
7502 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7504 struct tg3_napi *tnapi = dev_id;
7505 struct tg3 *tp = tnapi->tp;
7507 prefetch(tnapi->hw_status);
7509 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7511 if (likely(!tg3_irq_sync(tp)))
7512 napi_schedule(&tnapi->napi);
7517 /* MSI ISR - No need to check for interrupt sharing and no need to
7518 * flush status block and interrupt mailbox. PCI ordering rules
7519 * guarantee that MSI will arrive after the status block.
7521 static irqreturn_t tg3_msi(int irq, void *dev_id)
7523 struct tg3_napi *tnapi = dev_id;
7524 struct tg3 *tp = tnapi->tp;
7526 prefetch(tnapi->hw_status);
7528 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7530 * Writing any value to intr-mbox-0 clears PCI INTA# and
7531 * chip-internal interrupt pending events.
7532 * Writing non-zero to intr-mbox-0 additional tells the
7533 * NIC to stop sending us irqs, engaging "in-intr-handler"
7536 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7537 if (likely(!tg3_irq_sync(tp)))
7538 napi_schedule(&tnapi->napi);
7540 return IRQ_RETVAL(1);
7543 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7545 struct tg3_napi *tnapi = dev_id;
7546 struct tg3 *tp = tnapi->tp;
7547 struct tg3_hw_status *sblk = tnapi->hw_status;
7548 unsigned int handled = 1;
7550 /* In INTx mode, it is possible for the interrupt to arrive at
7551 * the CPU before the status block posted prior to the interrupt.
7552 * Reading the PCI State register will confirm whether the
7553 * interrupt is ours and will flush the status block.
7555 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7556 if (tg3_flag(tp, CHIP_RESETTING) ||
7557 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7564 * Writing any value to intr-mbox-0 clears PCI INTA# and
7565 * chip-internal interrupt pending events.
7566 * Writing non-zero to intr-mbox-0 additional tells the
7567 * NIC to stop sending us irqs, engaging "in-intr-handler"
7570 * Flush the mailbox to de-assert the IRQ immediately to prevent
7571 * spurious interrupts. The flush impacts performance but
7572 * excessive spurious interrupts can be worse in some cases.
7574 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575 if (tg3_irq_sync(tp))
7577 sblk->status &= ~SD_STATUS_UPDATED;
7578 if (likely(tg3_has_work(tnapi))) {
7579 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7580 napi_schedule(&tnapi->napi);
7582 /* No work, shared interrupt perhaps? re-enable
7583 * interrupts, and flush that PCI write
7585 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7589 return IRQ_RETVAL(handled);
7592 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7594 struct tg3_napi *tnapi = dev_id;
7595 struct tg3 *tp = tnapi->tp;
7596 struct tg3_hw_status *sblk = tnapi->hw_status;
7597 unsigned int handled = 1;
7599 /* In INTx mode, it is possible for the interrupt to arrive at
7600 * the CPU before the status block posted prior to the interrupt.
7601 * Reading the PCI State register will confirm whether the
7602 * interrupt is ours and will flush the status block.
7604 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7605 if (tg3_flag(tp, CHIP_RESETTING) ||
7606 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7613 * writing any value to intr-mbox-0 clears PCI INTA# and
7614 * chip-internal interrupt pending events.
7615 * writing non-zero to intr-mbox-0 additional tells the
7616 * NIC to stop sending us irqs, engaging "in-intr-handler"
7619 * Flush the mailbox to de-assert the IRQ immediately to prevent
7620 * spurious interrupts. The flush impacts performance but
7621 * excessive spurious interrupts can be worse in some cases.
7623 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7626 * In a shared interrupt configuration, sometimes other devices'
7627 * interrupts will scream. We record the current status tag here
7628 * so that the above check can report that the screaming interrupts
7629 * are unhandled. Eventually they will be silenced.
7631 tnapi->last_irq_tag = sblk->status_tag;
7633 if (tg3_irq_sync(tp))
7636 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7638 napi_schedule(&tnapi->napi);
7641 return IRQ_RETVAL(handled);
7644 /* ISR for interrupt test */
7645 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7647 struct tg3_napi *tnapi = dev_id;
7648 struct tg3 *tp = tnapi->tp;
7649 struct tg3_hw_status *sblk = tnapi->hw_status;
7651 if ((sblk->status & SD_STATUS_UPDATED) ||
7652 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7653 tg3_disable_ints(tp);
7654 return IRQ_RETVAL(1);
7656 return IRQ_RETVAL(0);
7659 #ifdef CONFIG_NET_POLL_CONTROLLER
7660 static void tg3_poll_controller(struct net_device *dev)
7663 struct tg3 *tp = netdev_priv(dev);
7665 if (tg3_irq_sync(tp))
7668 for (i = 0; i < tp->irq_cnt; i++)
7669 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7673 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7675 struct tg3 *tp = netdev_priv(dev);
7677 if (netif_msg_tx_err(tp)) {
7678 netdev_err(dev, "transmit timed out, resetting\n");
7682 tg3_reset_task_schedule(tp);
7685 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7686 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7688 u32 base = (u32) mapping & 0xffffffff;
7690 return base + len + 8 < base;
7693 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7694 * of any 4GB boundaries: 4G, 8G, etc
7696 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7699 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7700 u32 base = (u32) mapping & 0xffffffff;
7702 return ((base + len + (mss & 0x3fff)) < base);
7707 /* Test for DMA addresses > 40-bit */
7708 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7711 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7712 if (tg3_flag(tp, 40BIT_DMA_BUG))
7713 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7720 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7721 dma_addr_t mapping, u32 len, u32 flags,
7724 txbd->addr_hi = ((u64) mapping >> 32);
7725 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7726 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7727 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7730 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7731 dma_addr_t map, u32 len, u32 flags,
7734 struct tg3 *tp = tnapi->tp;
7737 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7740 if (tg3_4g_overflow_test(map, len))
7743 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7746 if (tg3_40bit_overflow_test(tp, map, len))
7749 if (tp->dma_limit) {
7750 u32 prvidx = *entry;
7751 u32 tmp_flag = flags & ~TXD_FLAG_END;
7752 while (len > tp->dma_limit && *budget) {
7753 u32 frag_len = tp->dma_limit;
7754 len -= tp->dma_limit;
7756 /* Avoid the 8byte DMA problem */
7758 len += tp->dma_limit / 2;
7759 frag_len = tp->dma_limit / 2;
7762 tnapi->tx_buffers[*entry].fragmented = true;
7764 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7765 frag_len, tmp_flag, mss, vlan);
7768 *entry = NEXT_TX(*entry);
7775 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7776 len, flags, mss, vlan);
7778 *entry = NEXT_TX(*entry);
7781 tnapi->tx_buffers[prvidx].fragmented = false;
7785 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7786 len, flags, mss, vlan);
7787 *entry = NEXT_TX(*entry);
7793 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7796 struct sk_buff *skb;
7797 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7802 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7803 skb_headlen(skb), DMA_TO_DEVICE);
7805 while (txb->fragmented) {
7806 txb->fragmented = false;
7807 entry = NEXT_TX(entry);
7808 txb = &tnapi->tx_buffers[entry];
7811 for (i = 0; i <= last; i++) {
7812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7814 entry = NEXT_TX(entry);
7815 txb = &tnapi->tx_buffers[entry];
7817 dma_unmap_page(&tnapi->tp->pdev->dev,
7818 dma_unmap_addr(txb, mapping),
7819 skb_frag_size(frag), DMA_TO_DEVICE);
7821 while (txb->fragmented) {
7822 txb->fragmented = false;
7823 entry = NEXT_TX(entry);
7824 txb = &tnapi->tx_buffers[entry];
7829 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7830 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7831 struct sk_buff **pskb,
7832 u32 *entry, u32 *budget,
7833 u32 base_flags, u32 mss, u32 vlan)
7835 struct tg3 *tp = tnapi->tp;
7836 struct sk_buff *new_skb, *skb = *pskb;
7837 dma_addr_t new_addr = 0;
7840 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7841 new_skb = skb_copy(skb, GFP_ATOMIC);
7843 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7845 new_skb = skb_copy_expand(skb,
7846 skb_headroom(skb) + more_headroom,
7847 skb_tailroom(skb), GFP_ATOMIC);
7853 /* New SKB is guaranteed to be linear. */
7854 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7855 new_skb->len, DMA_TO_DEVICE);
7856 /* Make sure the mapping succeeded */
7857 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7858 dev_kfree_skb_any(new_skb);
7861 u32 save_entry = *entry;
7863 base_flags |= TXD_FLAG_END;
7865 tnapi->tx_buffers[*entry].skb = new_skb;
7866 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7869 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7870 new_skb->len, base_flags,
7872 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7873 dev_kfree_skb_any(new_skb);
7879 dev_consume_skb_any(skb);
7884 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7886 /* Check if we will never have enough descriptors,
7887 * as gso_segs can be more than current ring size
7889 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7892 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7894 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7895 * indicated in tg3_tx_frag_set()
7897 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7898 struct netdev_queue *txq, struct sk_buff *skb)
7900 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7901 struct sk_buff *segs, *seg, *next;
7903 /* Estimate the number of fragments in the worst case */
7904 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7905 netif_tx_stop_queue(txq);
7907 /* netif_tx_stop_queue() must be done before checking
7908 * checking tx index in tg3_tx_avail() below, because in
7909 * tg3_tx(), we update tx index before checking for
7910 * netif_tx_queue_stopped().
7913 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7914 return NETDEV_TX_BUSY;
7916 netif_tx_wake_queue(txq);
7919 segs = skb_gso_segment(skb, tp->dev->features &
7920 ~(NETIF_F_TSO | NETIF_F_TSO6));
7921 if (IS_ERR(segs) || !segs)
7922 goto tg3_tso_bug_end;
7924 skb_list_walk_safe(segs, seg, next) {
7925 skb_mark_not_on_list(seg);
7926 __tg3_start_xmit(seg, tp->dev);
7930 dev_consume_skb_any(skb);
7932 return NETDEV_TX_OK;
7935 /* hard_start_xmit for all devices */
7936 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7938 struct tg3 *tp = netdev_priv(dev);
7939 u32 len, entry, base_flags, mss, vlan = 0;
7941 int i = -1, would_hit_hwbug;
7943 struct tg3_napi *tnapi;
7944 struct netdev_queue *txq;
7946 struct iphdr *iph = NULL;
7947 struct tcphdr *tcph = NULL;
7948 __sum16 tcp_csum = 0, ip_csum = 0;
7949 __be16 ip_tot_len = 0;
7951 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7952 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7953 if (tg3_flag(tp, ENABLE_TSS))
7956 budget = tg3_tx_avail(tnapi);
7958 /* We are running in BH disabled context with netif_tx_lock
7959 * and TX reclaim runs via tp->napi.poll inside of a software
7960 * interrupt. Furthermore, IRQ processing runs lockless so we have
7961 * no IRQ context deadlocks to worry about either. Rejoice!
7963 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7964 if (!netif_tx_queue_stopped(txq)) {
7965 netif_tx_stop_queue(txq);
7967 /* This is a hard error, log it. */
7969 "BUG! Tx Ring full when queue awake!\n");
7971 return NETDEV_TX_BUSY;
7974 entry = tnapi->tx_prod;
7977 mss = skb_shinfo(skb)->gso_size;
7979 u32 tcp_opt_len, hdr_len;
7981 if (skb_cow_head(skb, 0))
7985 tcp_opt_len = tcp_optlen(skb);
7987 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7989 /* HW/FW can not correctly segment packets that have been
7990 * vlan encapsulated.
7992 if (skb->protocol == htons(ETH_P_8021Q) ||
7993 skb->protocol == htons(ETH_P_8021AD)) {
7994 if (tg3_tso_bug_gso_check(tnapi, skb))
7995 return tg3_tso_bug(tp, tnapi, txq, skb);
7999 if (!skb_is_gso_v6(skb)) {
8000 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8001 tg3_flag(tp, TSO_BUG)) {
8002 if (tg3_tso_bug_gso_check(tnapi, skb))
8003 return tg3_tso_bug(tp, tnapi, txq, skb);
8006 ip_csum = iph->check;
8007 ip_tot_len = iph->tot_len;
8009 iph->tot_len = htons(mss + hdr_len);
8012 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8013 TXD_FLAG_CPU_POST_DMA);
8015 tcph = tcp_hdr(skb);
8016 tcp_csum = tcph->check;
8018 if (tg3_flag(tp, HW_TSO_1) ||
8019 tg3_flag(tp, HW_TSO_2) ||
8020 tg3_flag(tp, HW_TSO_3)) {
8022 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8024 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8028 if (tg3_flag(tp, HW_TSO_3)) {
8029 mss |= (hdr_len & 0xc) << 12;
8031 base_flags |= 0x00000010;
8032 base_flags |= (hdr_len & 0x3e0) << 5;
8033 } else if (tg3_flag(tp, HW_TSO_2))
8034 mss |= hdr_len << 9;
8035 else if (tg3_flag(tp, HW_TSO_1) ||
8036 tg3_asic_rev(tp) == ASIC_REV_5705) {
8037 if (tcp_opt_len || iph->ihl > 5) {
8040 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8041 mss |= (tsflags << 11);
8044 if (tcp_opt_len || iph->ihl > 5) {
8047 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8048 base_flags |= tsflags << 12;
8051 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8052 /* HW/FW can not correctly checksum packets that have been
8053 * vlan encapsulated.
8055 if (skb->protocol == htons(ETH_P_8021Q) ||
8056 skb->protocol == htons(ETH_P_8021AD)) {
8057 if (skb_checksum_help(skb))
8060 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8064 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8065 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8066 base_flags |= TXD_FLAG_JMB_PKT;
8068 if (skb_vlan_tag_present(skb)) {
8069 base_flags |= TXD_FLAG_VLAN;
8070 vlan = skb_vlan_tag_get(skb);
8073 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8074 tg3_flag(tp, TX_TSTAMP_EN)) {
8075 tg3_full_lock(tp, 0);
8076 if (!tp->pre_tx_ts) {
8077 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8078 base_flags |= TXD_FLAG_HWTSTAMP;
8079 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8081 tg3_full_unlock(tp);
8084 len = skb_headlen(skb);
8086 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8088 if (dma_mapping_error(&tp->pdev->dev, mapping))
8092 tnapi->tx_buffers[entry].skb = skb;
8093 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8095 would_hit_hwbug = 0;
8097 if (tg3_flag(tp, 5701_DMA_BUG))
8098 would_hit_hwbug = 1;
8100 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8101 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8103 would_hit_hwbug = 1;
8104 } else if (skb_shinfo(skb)->nr_frags > 0) {
8107 if (!tg3_flag(tp, HW_TSO_1) &&
8108 !tg3_flag(tp, HW_TSO_2) &&
8109 !tg3_flag(tp, HW_TSO_3))
8112 /* Now loop through additional data
8113 * fragments, and queue them.
8115 last = skb_shinfo(skb)->nr_frags - 1;
8116 for (i = 0; i <= last; i++) {
8117 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8119 len = skb_frag_size(frag);
8120 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8121 len, DMA_TO_DEVICE);
8123 tnapi->tx_buffers[entry].skb = NULL;
8124 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8126 if (dma_mapping_error(&tp->pdev->dev, mapping))
8130 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8132 ((i == last) ? TXD_FLAG_END : 0),
8134 would_hit_hwbug = 1;
8140 if (would_hit_hwbug) {
8141 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8143 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8144 /* If it's a TSO packet, do GSO instead of
8145 * allocating and copying to a large linear SKB
8148 iph->check = ip_csum;
8149 iph->tot_len = ip_tot_len;
8151 tcph->check = tcp_csum;
8152 return tg3_tso_bug(tp, tnapi, txq, skb);
8155 /* If the workaround fails due to memory/mapping
8156 * failure, silently drop this packet.
8158 entry = tnapi->tx_prod;
8159 budget = tg3_tx_avail(tnapi);
8160 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8161 base_flags, mss, vlan))
8165 skb_tx_timestamp(skb);
8166 netdev_tx_sent_queue(txq, skb->len);
8168 /* Sync BD data before updating mailbox */
8171 tnapi->tx_prod = entry;
8172 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8173 netif_tx_stop_queue(txq);
8175 /* netif_tx_stop_queue() must be done before checking
8176 * checking tx index in tg3_tx_avail() below, because in
8177 * tg3_tx(), we update tx index before checking for
8178 * netif_tx_queue_stopped().
8181 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8182 netif_tx_wake_queue(txq);
8185 return NETDEV_TX_OK;
8188 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8189 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8191 dev_kfree_skb_any(skb);
8194 return NETDEV_TX_OK;
8197 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8199 struct netdev_queue *txq;
8200 u16 skb_queue_mapping;
8203 skb_queue_mapping = skb_get_queue_mapping(skb);
8204 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8206 ret = __tg3_start_xmit(skb, dev);
8208 /* Notify the hardware that packets are ready by updating the TX ring
8209 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8210 * the hardware for every packet. To guarantee forward progress the TX
8211 * ring must be drained when it is full as indicated by
8212 * netif_xmit_stopped(). This needs to happen even when the current
8213 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8214 * queued by previous __tg3_start_xmit() calls might get stuck in
8215 * the queue forever.
8217 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8218 struct tg3_napi *tnapi;
8221 tp = netdev_priv(dev);
8222 tnapi = &tp->napi[skb_queue_mapping];
8224 if (tg3_flag(tp, ENABLE_TSS))
8227 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8233 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8236 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8237 MAC_MODE_PORT_MODE_MASK);
8239 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8241 if (!tg3_flag(tp, 5705_PLUS))
8242 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8244 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8245 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8247 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8249 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8251 if (tg3_flag(tp, 5705_PLUS) ||
8252 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8253 tg3_asic_rev(tp) == ASIC_REV_5700)
8254 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257 tw32(MAC_MODE, tp->mac_mode);
8261 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8263 u32 val, bmcr, mac_mode, ptest = 0;
8265 tg3_phy_toggle_apd(tp, false);
8266 tg3_phy_toggle_automdix(tp, false);
8268 if (extlpbk && tg3_phy_set_extloopbk(tp))
8271 bmcr = BMCR_FULLDPLX;
8276 bmcr |= BMCR_SPEED100;
8280 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8282 bmcr |= BMCR_SPEED100;
8285 bmcr |= BMCR_SPEED1000;
8290 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8291 tg3_readphy(tp, MII_CTRL1000, &val);
8292 val |= CTL1000_AS_MASTER |
8293 CTL1000_ENABLE_MASTER;
8294 tg3_writephy(tp, MII_CTRL1000, val);
8296 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8297 MII_TG3_FET_PTEST_TRIM_2;
8298 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8301 bmcr |= BMCR_LOOPBACK;
8303 tg3_writephy(tp, MII_BMCR, bmcr);
8305 /* The write needs to be flushed for the FETs */
8306 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8307 tg3_readphy(tp, MII_BMCR, &bmcr);
8311 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8312 tg3_asic_rev(tp) == ASIC_REV_5785) {
8313 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8314 MII_TG3_FET_PTEST_FRC_TX_LINK |
8315 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8317 /* The write needs to be flushed for the AC131 */
8318 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8321 /* Reset to prevent losing 1st rx packet intermittently */
8322 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8323 tg3_flag(tp, 5780_CLASS)) {
8324 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8326 tw32_f(MAC_RX_MODE, tp->rx_mode);
8329 mac_mode = tp->mac_mode &
8330 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8331 if (speed == SPEED_1000)
8332 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8334 mac_mode |= MAC_MODE_PORT_MODE_MII;
8336 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8337 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8339 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8340 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8341 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8342 mac_mode |= MAC_MODE_LINK_POLARITY;
8344 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8345 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8348 tw32(MAC_MODE, mac_mode);
8354 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8356 struct tg3 *tp = netdev_priv(dev);
8358 if (features & NETIF_F_LOOPBACK) {
8359 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8362 spin_lock_bh(&tp->lock);
8363 tg3_mac_loopback(tp, true);
8364 netif_carrier_on(tp->dev);
8365 spin_unlock_bh(&tp->lock);
8366 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8368 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8371 spin_lock_bh(&tp->lock);
8372 tg3_mac_loopback(tp, false);
8373 /* Force link status check */
8374 tg3_setup_phy(tp, true);
8375 spin_unlock_bh(&tp->lock);
8376 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8380 static netdev_features_t tg3_fix_features(struct net_device *dev,
8381 netdev_features_t features)
8383 struct tg3 *tp = netdev_priv(dev);
8385 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8386 features &= ~NETIF_F_ALL_TSO;
8391 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8393 netdev_features_t changed = dev->features ^ features;
8395 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8396 tg3_set_loopback(dev, features);
8401 static void tg3_rx_prodring_free(struct tg3 *tp,
8402 struct tg3_rx_prodring_set *tpr)
8406 if (tpr != &tp->napi[0].prodring) {
8407 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8408 i = (i + 1) & tp->rx_std_ring_mask)
8409 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8412 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8413 for (i = tpr->rx_jmb_cons_idx;
8414 i != tpr->rx_jmb_prod_idx;
8415 i = (i + 1) & tp->rx_jmb_ring_mask) {
8416 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8424 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8425 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8428 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8429 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8430 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8435 /* Initialize rx rings for packet processing.
8437 * The chip has been shut down and the driver detached from
8438 * the networking, so no interrupts or new tx packets will
8439 * end up in the driver. tp->{tx,}lock are held and thus
8442 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8443 struct tg3_rx_prodring_set *tpr)
8445 u32 i, rx_pkt_dma_sz;
8447 tpr->rx_std_cons_idx = 0;
8448 tpr->rx_std_prod_idx = 0;
8449 tpr->rx_jmb_cons_idx = 0;
8450 tpr->rx_jmb_prod_idx = 0;
8452 if (tpr != &tp->napi[0].prodring) {
8453 memset(&tpr->rx_std_buffers[0], 0,
8454 TG3_RX_STD_BUFF_RING_SIZE(tp));
8455 if (tpr->rx_jmb_buffers)
8456 memset(&tpr->rx_jmb_buffers[0], 0,
8457 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8461 /* Zero out all descriptors. */
8462 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8464 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8465 if (tg3_flag(tp, 5780_CLASS) &&
8466 tp->dev->mtu > ETH_DATA_LEN)
8467 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8468 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8470 /* Initialize invariants of the rings, we only set this
8471 * stuff once. This works because the card does not
8472 * write into the rx buffer posting rings.
8474 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8475 struct tg3_rx_buffer_desc *rxd;
8477 rxd = &tpr->rx_std[i];
8478 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8479 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8480 rxd->opaque = (RXD_OPAQUE_RING_STD |
8481 (i << RXD_OPAQUE_INDEX_SHIFT));
8484 /* Now allocate fresh SKBs for each rx ring. */
8485 for (i = 0; i < tp->rx_pending; i++) {
8486 unsigned int frag_size;
8488 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8490 netdev_warn(tp->dev,
8491 "Using a smaller RX standard ring. Only "
8492 "%d out of %d buffers were allocated "
8493 "successfully\n", i, tp->rx_pending);
8501 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8504 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8506 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8509 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8510 struct tg3_rx_buffer_desc *rxd;
8512 rxd = &tpr->rx_jmb[i].std;
8513 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8514 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8516 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8517 (i << RXD_OPAQUE_INDEX_SHIFT));
8520 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8521 unsigned int frag_size;
8523 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8525 netdev_warn(tp->dev,
8526 "Using a smaller RX jumbo ring. Only %d "
8527 "out of %d buffers were allocated "
8528 "successfully\n", i, tp->rx_jumbo_pending);
8531 tp->rx_jumbo_pending = i;
8540 tg3_rx_prodring_free(tp, tpr);
8544 static void tg3_rx_prodring_fini(struct tg3 *tp,
8545 struct tg3_rx_prodring_set *tpr)
8547 kfree(tpr->rx_std_buffers);
8548 tpr->rx_std_buffers = NULL;
8549 kfree(tpr->rx_jmb_buffers);
8550 tpr->rx_jmb_buffers = NULL;
8552 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8553 tpr->rx_std, tpr->rx_std_mapping);
8557 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8558 tpr->rx_jmb, tpr->rx_jmb_mapping);
8563 static int tg3_rx_prodring_init(struct tg3 *tp,
8564 struct tg3_rx_prodring_set *tpr)
8566 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8568 if (!tpr->rx_std_buffers)
8571 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8572 TG3_RX_STD_RING_BYTES(tp),
8573 &tpr->rx_std_mapping,
8578 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8579 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8581 if (!tpr->rx_jmb_buffers)
8584 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8585 TG3_RX_JMB_RING_BYTES(tp),
8586 &tpr->rx_jmb_mapping,
8595 tg3_rx_prodring_fini(tp, tpr);
8599 /* Free up pending packets in all rx/tx rings.
8601 * The chip has been shut down and the driver detached from
8602 * the networking, so no interrupts or new tx packets will
8603 * end up in the driver. tp->{tx,}lock is not held and we are not
8604 * in an interrupt context and thus may sleep.
8606 static void tg3_free_rings(struct tg3 *tp)
8610 for (j = 0; j < tp->irq_cnt; j++) {
8611 struct tg3_napi *tnapi = &tp->napi[j];
8613 tg3_rx_prodring_free(tp, &tnapi->prodring);
8615 if (!tnapi->tx_buffers)
8618 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8619 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8624 tg3_tx_skb_unmap(tnapi, i,
8625 skb_shinfo(skb)->nr_frags - 1);
8627 dev_consume_skb_any(skb);
8629 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8633 /* Initialize tx/rx rings for packet processing.
8635 * The chip has been shut down and the driver detached from
8636 * the networking, so no interrupts or new tx packets will
8637 * end up in the driver. tp->{tx,}lock are held and thus
8640 static int tg3_init_rings(struct tg3 *tp)
8644 /* Free up all the SKBs. */
8647 for (i = 0; i < tp->irq_cnt; i++) {
8648 struct tg3_napi *tnapi = &tp->napi[i];
8650 tnapi->last_tag = 0;
8651 tnapi->last_irq_tag = 0;
8652 tnapi->hw_status->status = 0;
8653 tnapi->hw_status->status_tag = 0;
8654 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8659 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8661 tnapi->rx_rcb_ptr = 0;
8663 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8665 if (tnapi->prodring.rx_std &&
8666 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8675 static void tg3_mem_tx_release(struct tg3 *tp)
8679 for (i = 0; i < tp->irq_max; i++) {
8680 struct tg3_napi *tnapi = &tp->napi[i];
8682 if (tnapi->tx_ring) {
8683 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8684 tnapi->tx_ring, tnapi->tx_desc_mapping);
8685 tnapi->tx_ring = NULL;
8688 kfree(tnapi->tx_buffers);
8689 tnapi->tx_buffers = NULL;
8693 static int tg3_mem_tx_acquire(struct tg3 *tp)
8696 struct tg3_napi *tnapi = &tp->napi[0];
8698 /* If multivector TSS is enabled, vector 0 does not handle
8699 * tx interrupts. Don't allocate any resources for it.
8701 if (tg3_flag(tp, ENABLE_TSS))
8704 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8705 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8706 sizeof(struct tg3_tx_ring_info),
8708 if (!tnapi->tx_buffers)
8711 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8713 &tnapi->tx_desc_mapping,
8715 if (!tnapi->tx_ring)
8722 tg3_mem_tx_release(tp);
8726 static void tg3_mem_rx_release(struct tg3 *tp)
8730 for (i = 0; i < tp->irq_max; i++) {
8731 struct tg3_napi *tnapi = &tp->napi[i];
8733 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8738 dma_free_coherent(&tp->pdev->dev,
8739 TG3_RX_RCB_RING_BYTES(tp),
8741 tnapi->rx_rcb_mapping);
8742 tnapi->rx_rcb = NULL;
8746 static int tg3_mem_rx_acquire(struct tg3 *tp)
8748 unsigned int i, limit;
8750 limit = tp->rxq_cnt;
8752 /* If RSS is enabled, we need a (dummy) producer ring
8753 * set on vector zero. This is the true hw prodring.
8755 if (tg3_flag(tp, ENABLE_RSS))
8758 for (i = 0; i < limit; i++) {
8759 struct tg3_napi *tnapi = &tp->napi[i];
8761 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8764 /* If multivector RSS is enabled, vector 0
8765 * does not handle rx or tx interrupts.
8766 * Don't allocate any resources for it.
8768 if (!i && tg3_flag(tp, ENABLE_RSS))
8771 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8772 TG3_RX_RCB_RING_BYTES(tp),
8773 &tnapi->rx_rcb_mapping,
8782 tg3_mem_rx_release(tp);
8787 * Must not be invoked with interrupt sources disabled and
8788 * the hardware shutdown down.
8790 static void tg3_free_consistent(struct tg3 *tp)
8794 for (i = 0; i < tp->irq_cnt; i++) {
8795 struct tg3_napi *tnapi = &tp->napi[i];
8797 if (tnapi->hw_status) {
8798 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8800 tnapi->status_mapping);
8801 tnapi->hw_status = NULL;
8805 tg3_mem_rx_release(tp);
8806 tg3_mem_tx_release(tp);
8808 /* tp->hw_stats can be referenced safely:
8809 * 1. under rtnl_lock
8810 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8813 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8814 tp->hw_stats, tp->stats_mapping);
8815 tp->hw_stats = NULL;
8820 * Must not be invoked with interrupt sources disabled and
8821 * the hardware shutdown down. Can sleep.
8823 static int tg3_alloc_consistent(struct tg3 *tp)
8827 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8828 sizeof(struct tg3_hw_stats),
8829 &tp->stats_mapping, GFP_KERNEL);
8833 for (i = 0; i < tp->irq_cnt; i++) {
8834 struct tg3_napi *tnapi = &tp->napi[i];
8835 struct tg3_hw_status *sblk;
8837 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8839 &tnapi->status_mapping,
8841 if (!tnapi->hw_status)
8844 sblk = tnapi->hw_status;
8846 if (tg3_flag(tp, ENABLE_RSS)) {
8847 u16 *prodptr = NULL;
8850 * When RSS is enabled, the status block format changes
8851 * slightly. The "rx_jumbo_consumer", "reserved",
8852 * and "rx_mini_consumer" members get mapped to the
8853 * other three rx return ring producer indexes.
8857 prodptr = &sblk->idx[0].rx_producer;
8860 prodptr = &sblk->rx_jumbo_consumer;
8863 prodptr = &sblk->reserved;
8866 prodptr = &sblk->rx_mini_consumer;
8869 tnapi->rx_rcb_prod_idx = prodptr;
8871 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8875 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8881 tg3_free_consistent(tp);
8885 #define MAX_WAIT_CNT 1000
8887 /* To stop a block, clear the enable bit and poll till it
8888 * clears. tp->lock is held.
8890 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8895 if (tg3_flag(tp, 5705_PLUS)) {
8902 /* We can't enable/disable these bits of the
8903 * 5705/5750, just say success.
8916 for (i = 0; i < MAX_WAIT_CNT; i++) {
8917 if (pci_channel_offline(tp->pdev)) {
8918 dev_err(&tp->pdev->dev,
8919 "tg3_stop_block device offline, "
8920 "ofs=%lx enable_bit=%x\n",
8927 if ((val & enable_bit) == 0)
8931 if (i == MAX_WAIT_CNT && !silent) {
8932 dev_err(&tp->pdev->dev,
8933 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8941 /* tp->lock is held. */
8942 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8946 tg3_disable_ints(tp);
8948 if (pci_channel_offline(tp->pdev)) {
8949 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8950 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8955 tp->rx_mode &= ~RX_MODE_ENABLE;
8956 tw32_f(MAC_RX_MODE, tp->rx_mode);
8959 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8960 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8961 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8962 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8963 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8964 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8966 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8967 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8968 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8969 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8970 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8971 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8972 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8974 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8975 tw32_f(MAC_MODE, tp->mac_mode);
8978 tp->tx_mode &= ~TX_MODE_ENABLE;
8979 tw32_f(MAC_TX_MODE, tp->tx_mode);
8981 for (i = 0; i < MAX_WAIT_CNT; i++) {
8983 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8986 if (i >= MAX_WAIT_CNT) {
8987 dev_err(&tp->pdev->dev,
8988 "%s timed out, TX_MODE_ENABLE will not clear "
8989 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8993 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8994 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8995 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8997 tw32(FTQ_RESET, 0xffffffff);
8998 tw32(FTQ_RESET, 0x00000000);
9000 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9001 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9004 for (i = 0; i < tp->irq_cnt; i++) {
9005 struct tg3_napi *tnapi = &tp->napi[i];
9006 if (tnapi->hw_status)
9007 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9013 /* Save PCI command register before chip reset */
9014 static void tg3_save_pci_state(struct tg3 *tp)
9016 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9019 /* Restore PCI state after chip reset */
9020 static void tg3_restore_pci_state(struct tg3 *tp)
9024 /* Re-enable indirect register accesses. */
9025 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9026 tp->misc_host_ctrl);
9028 /* Set MAX PCI retry to zero. */
9029 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9030 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9031 tg3_flag(tp, PCIX_MODE))
9032 val |= PCISTATE_RETRY_SAME_DMA;
9033 /* Allow reads and writes to the APE register and memory space. */
9034 if (tg3_flag(tp, ENABLE_APE))
9035 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9036 PCISTATE_ALLOW_APE_SHMEM_WR |
9037 PCISTATE_ALLOW_APE_PSPACE_WR;
9038 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9040 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9042 if (!tg3_flag(tp, PCI_EXPRESS)) {
9043 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9044 tp->pci_cacheline_sz);
9045 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9049 /* Make sure PCI-X relaxed ordering bit is clear. */
9050 if (tg3_flag(tp, PCIX_MODE)) {
9053 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9055 pcix_cmd &= ~PCI_X_CMD_ERO;
9056 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9060 if (tg3_flag(tp, 5780_CLASS)) {
9062 /* Chip reset on 5780 will reset MSI enable bit,
9063 * so need to restore it.
9065 if (tg3_flag(tp, USING_MSI)) {
9068 pci_read_config_word(tp->pdev,
9069 tp->msi_cap + PCI_MSI_FLAGS,
9071 pci_write_config_word(tp->pdev,
9072 tp->msi_cap + PCI_MSI_FLAGS,
9073 ctrl | PCI_MSI_FLAGS_ENABLE);
9074 val = tr32(MSGINT_MODE);
9075 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9080 static void tg3_override_clk(struct tg3 *tp)
9084 switch (tg3_asic_rev(tp)) {
9086 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9087 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9088 TG3_CPMU_MAC_ORIDE_ENABLE);
9093 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9101 static void tg3_restore_clk(struct tg3 *tp)
9105 switch (tg3_asic_rev(tp)) {
9107 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9108 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9109 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9114 val = tr32(TG3_CPMU_CLCK_ORIDE);
9115 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9123 /* tp->lock is held. */
9124 static int tg3_chip_reset(struct tg3 *tp)
9125 __releases(tp->lock)
9126 __acquires(tp->lock)
9129 void (*write_op)(struct tg3 *, u32, u32);
9132 if (!pci_device_is_present(tp->pdev))
9137 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9139 /* No matching tg3_nvram_unlock() after this because
9140 * chip reset below will undo the nvram lock.
9142 tp->nvram_lock_cnt = 0;
9144 /* GRC_MISC_CFG core clock reset will clear the memory
9145 * enable bit in PCI register 4 and the MSI enable bit
9146 * on some chips, so we save relevant registers here.
9148 tg3_save_pci_state(tp);
9150 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9151 tg3_flag(tp, 5755_PLUS))
9152 tw32(GRC_FASTBOOT_PC, 0);
9155 * We must avoid the readl() that normally takes place.
9156 * It locks machines, causes machine checks, and other
9157 * fun things. So, temporarily disable the 5701
9158 * hardware workaround, while we do the reset.
9160 write_op = tp->write32;
9161 if (write_op == tg3_write_flush_reg32)
9162 tp->write32 = tg3_write32;
9164 /* Prevent the irq handler from reading or writing PCI registers
9165 * during chip reset when the memory enable bit in the PCI command
9166 * register may be cleared. The chip does not generate interrupt
9167 * at this time, but the irq handler may still be called due to irq
9168 * sharing or irqpoll.
9170 tg3_flag_set(tp, CHIP_RESETTING);
9171 for (i = 0; i < tp->irq_cnt; i++) {
9172 struct tg3_napi *tnapi = &tp->napi[i];
9173 if (tnapi->hw_status) {
9174 tnapi->hw_status->status = 0;
9175 tnapi->hw_status->status_tag = 0;
9177 tnapi->last_tag = 0;
9178 tnapi->last_irq_tag = 0;
9182 tg3_full_unlock(tp);
9184 for (i = 0; i < tp->irq_cnt; i++)
9185 synchronize_irq(tp->napi[i].irq_vec);
9187 tg3_full_lock(tp, 0);
9189 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9190 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9191 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9195 val = GRC_MISC_CFG_CORECLK_RESET;
9197 if (tg3_flag(tp, PCI_EXPRESS)) {
9198 /* Force PCIe 1.0a mode */
9199 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9200 !tg3_flag(tp, 57765_PLUS) &&
9201 tr32(TG3_PCIE_PHY_TSTCTL) ==
9202 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9203 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9205 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9206 tw32(GRC_MISC_CFG, (1 << 29));
9211 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9212 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9213 tw32(GRC_VCPU_EXT_CTRL,
9214 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9217 /* Set the clock to the highest frequency to avoid timeouts. With link
9218 * aware mode, the clock speed could be slow and bootcode does not
9219 * complete within the expected time. Override the clock to allow the
9220 * bootcode to finish sooner and then restore it.
9222 tg3_override_clk(tp);
9224 /* Manage gphy power for all CPMU absent PCIe devices. */
9225 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9226 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9228 tw32(GRC_MISC_CFG, val);
9230 /* restore 5701 hardware bug workaround write method */
9231 tp->write32 = write_op;
9233 /* Unfortunately, we have to delay before the PCI read back.
9234 * Some 575X chips even will not respond to a PCI cfg access
9235 * when the reset command is given to the chip.
9237 * How do these hardware designers expect things to work
9238 * properly if the PCI write is posted for a long period
9239 * of time? It is always necessary to have some method by
9240 * which a register read back can occur to push the write
9241 * out which does the reset.
9243 * For most tg3 variants the trick below was working.
9248 /* Flush PCI posted writes. The normal MMIO registers
9249 * are inaccessible at this time so this is the only
9250 * way to make this reliably (actually, this is no longer
9251 * the case, see above). I tried to use indirect
9252 * register read/write but this upset some 5701 variants.
9254 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9258 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9261 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9265 /* Wait for link training to complete. */
9266 for (j = 0; j < 5000; j++)
9269 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9270 pci_write_config_dword(tp->pdev, 0xc4,
9271 cfg_val | (1 << 15));
9274 /* Clear the "no snoop" and "relaxed ordering" bits. */
9275 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9277 * Older PCIe devices only support the 128 byte
9278 * MPS setting. Enforce the restriction.
9280 if (!tg3_flag(tp, CPMU_PRESENT))
9281 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9282 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9284 /* Clear error status */
9285 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9286 PCI_EXP_DEVSTA_CED |
9287 PCI_EXP_DEVSTA_NFED |
9288 PCI_EXP_DEVSTA_FED |
9289 PCI_EXP_DEVSTA_URD);
9292 tg3_restore_pci_state(tp);
9294 tg3_flag_clear(tp, CHIP_RESETTING);
9295 tg3_flag_clear(tp, ERROR_PROCESSED);
9298 if (tg3_flag(tp, 5780_CLASS))
9299 val = tr32(MEMARB_MODE);
9300 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9302 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9304 tw32(0x5000, 0x400);
9307 if (tg3_flag(tp, IS_SSB_CORE)) {
9309 * BCM4785: In order to avoid repercussions from using
9310 * potentially defective internal ROM, stop the Rx RISC CPU,
9311 * which is not required.
9314 tg3_halt_cpu(tp, RX_CPU_BASE);
9317 err = tg3_poll_fw(tp);
9321 tw32(GRC_MODE, tp->grc_mode);
9323 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9326 tw32(0xc4, val | (1 << 15));
9329 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9330 tg3_asic_rev(tp) == ASIC_REV_5705) {
9331 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9333 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9334 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9337 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9338 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9340 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9341 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9346 tw32_f(MAC_MODE, val);
9349 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9353 if (tg3_flag(tp, PCI_EXPRESS) &&
9354 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9355 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9356 !tg3_flag(tp, 57765_PLUS)) {
9359 tw32(0x7c00, val | (1 << 25));
9362 tg3_restore_clk(tp);
9364 /* Increase the core clock speed to fix tx timeout issue for 5762
9365 * with 100Mbps link speed.
9367 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9368 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9369 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9370 TG3_CPMU_MAC_ORIDE_ENABLE);
9373 /* Reprobe ASF enable state. */
9374 tg3_flag_clear(tp, ENABLE_ASF);
9375 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9376 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9378 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9379 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9380 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9383 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9384 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9385 tg3_flag_set(tp, ENABLE_ASF);
9386 tp->last_event_jiffies = jiffies;
9387 if (tg3_flag(tp, 5750_PLUS))
9388 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9390 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9391 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9392 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9393 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9394 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9401 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9402 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9403 static void __tg3_set_rx_mode(struct net_device *);
9405 /* tp->lock is held. */
9406 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9412 tg3_write_sig_pre_reset(tp, kind);
9414 tg3_abort_hw(tp, silent);
9415 err = tg3_chip_reset(tp);
9417 __tg3_set_mac_addr(tp, false);
9419 tg3_write_sig_legacy(tp, kind);
9420 tg3_write_sig_post_reset(tp, kind);
9423 /* Save the stats across chip resets... */
9424 tg3_get_nstats(tp, &tp->net_stats_prev);
9425 tg3_get_estats(tp, &tp->estats_prev);
9427 /* And make sure the next sample is new data */
9428 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9434 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9436 struct tg3 *tp = netdev_priv(dev);
9437 struct sockaddr *addr = p;
9439 bool skip_mac_1 = false;
9441 if (!is_valid_ether_addr(addr->sa_data))
9442 return -EADDRNOTAVAIL;
9444 eth_hw_addr_set(dev, addr->sa_data);
9446 if (!netif_running(dev))
9449 if (tg3_flag(tp, ENABLE_ASF)) {
9450 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9452 addr0_high = tr32(MAC_ADDR_0_HIGH);
9453 addr0_low = tr32(MAC_ADDR_0_LOW);
9454 addr1_high = tr32(MAC_ADDR_1_HIGH);
9455 addr1_low = tr32(MAC_ADDR_1_LOW);
9457 /* Skip MAC addr 1 if ASF is using it. */
9458 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9459 !(addr1_high == 0 && addr1_low == 0))
9462 spin_lock_bh(&tp->lock);
9463 __tg3_set_mac_addr(tp, skip_mac_1);
9464 __tg3_set_rx_mode(dev);
9465 spin_unlock_bh(&tp->lock);
9470 /* tp->lock is held. */
9471 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9472 dma_addr_t mapping, u32 maxlen_flags,
9476 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9477 ((u64) mapping >> 32));
9479 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9480 ((u64) mapping & 0xffffffff));
9482 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9485 if (!tg3_flag(tp, 5705_PLUS))
9487 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9492 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9496 if (!tg3_flag(tp, ENABLE_TSS)) {
9497 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9498 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9499 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9501 tw32(HOSTCC_TXCOL_TICKS, 0);
9502 tw32(HOSTCC_TXMAX_FRAMES, 0);
9503 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9505 for (; i < tp->txq_cnt; i++) {
9508 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9509 tw32(reg, ec->tx_coalesce_usecs);
9510 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9511 tw32(reg, ec->tx_max_coalesced_frames);
9512 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9513 tw32(reg, ec->tx_max_coalesced_frames_irq);
9517 for (; i < tp->irq_max - 1; i++) {
9518 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9519 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9520 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9524 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9527 u32 limit = tp->rxq_cnt;
9529 if (!tg3_flag(tp, ENABLE_RSS)) {
9530 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9531 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9532 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9535 tw32(HOSTCC_RXCOL_TICKS, 0);
9536 tw32(HOSTCC_RXMAX_FRAMES, 0);
9537 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9540 for (; i < limit; i++) {
9543 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9544 tw32(reg, ec->rx_coalesce_usecs);
9545 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9546 tw32(reg, ec->rx_max_coalesced_frames);
9547 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9548 tw32(reg, ec->rx_max_coalesced_frames_irq);
9551 for (; i < tp->irq_max - 1; i++) {
9552 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9553 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9554 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9558 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9560 tg3_coal_tx_init(tp, ec);
9561 tg3_coal_rx_init(tp, ec);
9563 if (!tg3_flag(tp, 5705_PLUS)) {
9564 u32 val = ec->stats_block_coalesce_usecs;
9566 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9567 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9572 tw32(HOSTCC_STAT_COAL_TICKS, val);
9576 /* tp->lock is held. */
9577 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9581 /* Disable all transmit rings but the first. */
9582 if (!tg3_flag(tp, 5705_PLUS))
9583 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9584 else if (tg3_flag(tp, 5717_PLUS))
9585 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9586 else if (tg3_flag(tp, 57765_CLASS) ||
9587 tg3_asic_rev(tp) == ASIC_REV_5762)
9588 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9590 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9592 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9593 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9594 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9595 BDINFO_FLAGS_DISABLED);
9598 /* tp->lock is held. */
9599 static void tg3_tx_rcbs_init(struct tg3 *tp)
9602 u32 txrcb = NIC_SRAM_SEND_RCB;
9604 if (tg3_flag(tp, ENABLE_TSS))
9607 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9608 struct tg3_napi *tnapi = &tp->napi[i];
9610 if (!tnapi->tx_ring)
9613 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9614 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9615 NIC_SRAM_TX_BUFFER_DESC);
9619 /* tp->lock is held. */
9620 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9624 /* Disable all receive return rings but the first. */
9625 if (tg3_flag(tp, 5717_PLUS))
9626 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9627 else if (!tg3_flag(tp, 5705_PLUS))
9628 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9629 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9630 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9631 tg3_flag(tp, 57765_CLASS))
9632 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9634 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9636 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9637 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9638 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9639 BDINFO_FLAGS_DISABLED);
9642 /* tp->lock is held. */
9643 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9646 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9648 if (tg3_flag(tp, ENABLE_RSS))
9651 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9652 struct tg3_napi *tnapi = &tp->napi[i];
9657 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9658 (tp->rx_ret_ring_mask + 1) <<
9659 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9663 /* tp->lock is held. */
9664 static void tg3_rings_reset(struct tg3 *tp)
9668 struct tg3_napi *tnapi = &tp->napi[0];
9670 tg3_tx_rcbs_disable(tp);
9672 tg3_rx_ret_rcbs_disable(tp);
9674 /* Disable interrupts */
9675 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9676 tp->napi[0].chk_msi_cnt = 0;
9677 tp->napi[0].last_rx_cons = 0;
9678 tp->napi[0].last_tx_cons = 0;
9680 /* Zero mailbox registers. */
9681 if (tg3_flag(tp, SUPPORT_MSIX)) {
9682 for (i = 1; i < tp->irq_max; i++) {
9683 tp->napi[i].tx_prod = 0;
9684 tp->napi[i].tx_cons = 0;
9685 if (tg3_flag(tp, ENABLE_TSS))
9686 tw32_mailbox(tp->napi[i].prodmbox, 0);
9687 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9688 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9689 tp->napi[i].chk_msi_cnt = 0;
9690 tp->napi[i].last_rx_cons = 0;
9691 tp->napi[i].last_tx_cons = 0;
9693 if (!tg3_flag(tp, ENABLE_TSS))
9694 tw32_mailbox(tp->napi[0].prodmbox, 0);
9696 tp->napi[0].tx_prod = 0;
9697 tp->napi[0].tx_cons = 0;
9698 tw32_mailbox(tp->napi[0].prodmbox, 0);
9699 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9702 /* Make sure the NIC-based send BD rings are disabled. */
9703 if (!tg3_flag(tp, 5705_PLUS)) {
9704 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9705 for (i = 0; i < 16; i++)
9706 tw32_tx_mbox(mbox + i * 8, 0);
9709 /* Clear status block in ram. */
9710 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9712 /* Set status block DMA address */
9713 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9714 ((u64) tnapi->status_mapping >> 32));
9715 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9716 ((u64) tnapi->status_mapping & 0xffffffff));
9718 stblk = HOSTCC_STATBLCK_RING1;
9720 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9721 u64 mapping = (u64)tnapi->status_mapping;
9722 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9723 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9726 /* Clear status block in ram. */
9727 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9730 tg3_tx_rcbs_init(tp);
9731 tg3_rx_ret_rcbs_init(tp);
9734 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9736 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9738 if (!tg3_flag(tp, 5750_PLUS) ||
9739 tg3_flag(tp, 5780_CLASS) ||
9740 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9741 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9742 tg3_flag(tp, 57765_PLUS))
9743 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9744 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9745 tg3_asic_rev(tp) == ASIC_REV_5787)
9746 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9748 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9750 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9751 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9753 val = min(nic_rep_thresh, host_rep_thresh);
9754 tw32(RCVBDI_STD_THRESH, val);
9756 if (tg3_flag(tp, 57765_PLUS))
9757 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9759 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9762 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9764 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9766 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9767 tw32(RCVBDI_JUMBO_THRESH, val);
9769 if (tg3_flag(tp, 57765_PLUS))
9770 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9773 static inline u32 calc_crc(unsigned char *buf, int len)
9781 for (j = 0; j < len; j++) {
9784 for (k = 0; k < 8; k++) {
9790 reg ^= CRC32_POLY_LE;
9797 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9799 /* accept or reject all multicast frames */
9800 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9801 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9802 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9803 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9806 static void __tg3_set_rx_mode(struct net_device *dev)
9808 struct tg3 *tp = netdev_priv(dev);
9811 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9812 RX_MODE_KEEP_VLAN_TAG);
9814 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9815 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9818 if (!tg3_flag(tp, ENABLE_ASF))
9819 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9822 if (dev->flags & IFF_PROMISC) {
9823 /* Promiscuous mode. */
9824 rx_mode |= RX_MODE_PROMISC;
9825 } else if (dev->flags & IFF_ALLMULTI) {
9826 /* Accept all multicast. */
9827 tg3_set_multi(tp, 1);
9828 } else if (netdev_mc_empty(dev)) {
9829 /* Reject all multicast. */
9830 tg3_set_multi(tp, 0);
9832 /* Accept one or more multicast(s). */
9833 struct netdev_hw_addr *ha;
9834 u32 mc_filter[4] = { 0, };
9839 netdev_for_each_mc_addr(ha, dev) {
9840 crc = calc_crc(ha->addr, ETH_ALEN);
9842 regidx = (bit & 0x60) >> 5;
9844 mc_filter[regidx] |= (1 << bit);
9847 tw32(MAC_HASH_REG_0, mc_filter[0]);
9848 tw32(MAC_HASH_REG_1, mc_filter[1]);
9849 tw32(MAC_HASH_REG_2, mc_filter[2]);
9850 tw32(MAC_HASH_REG_3, mc_filter[3]);
9853 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9854 rx_mode |= RX_MODE_PROMISC;
9855 } else if (!(dev->flags & IFF_PROMISC)) {
9856 /* Add all entries into to the mac addr filter list */
9858 struct netdev_hw_addr *ha;
9860 netdev_for_each_uc_addr(ha, dev) {
9861 __tg3_set_one_mac_addr(tp, ha->addr,
9862 i + TG3_UCAST_ADDR_IDX(tp));
9867 if (rx_mode != tp->rx_mode) {
9868 tp->rx_mode = rx_mode;
9869 tw32_f(MAC_RX_MODE, rx_mode);
9874 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9878 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9879 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9882 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9886 if (!tg3_flag(tp, SUPPORT_MSIX))
9889 if (tp->rxq_cnt == 1) {
9890 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9894 /* Validate table against current IRQ count */
9895 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9896 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9900 if (i != TG3_RSS_INDIR_TBL_SIZE)
9901 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9904 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9907 u32 reg = MAC_RSS_INDIR_TBL_0;
9909 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9910 u32 val = tp->rss_ind_tbl[i];
9912 for (; i % 8; i++) {
9914 val |= tp->rss_ind_tbl[i];
9921 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9923 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9924 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9926 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9929 /* tp->lock is held. */
9930 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9932 u32 val, rdmac_mode;
9934 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9936 tg3_disable_ints(tp);
9940 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9942 if (tg3_flag(tp, INIT_COMPLETE))
9943 tg3_abort_hw(tp, 1);
9945 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9946 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9947 tg3_phy_pull_config(tp);
9948 tg3_eee_pull_config(tp, NULL);
9949 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9952 /* Enable MAC control of LPI */
9953 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9959 err = tg3_chip_reset(tp);
9963 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9965 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9966 val = tr32(TG3_CPMU_CTRL);
9967 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9968 tw32(TG3_CPMU_CTRL, val);
9970 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9971 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9972 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9973 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9975 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9976 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9977 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9978 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9980 val = tr32(TG3_CPMU_HST_ACC);
9981 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9982 val |= CPMU_HST_ACC_MACCLK_6_25;
9983 tw32(TG3_CPMU_HST_ACC, val);
9986 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9987 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9988 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9989 PCIE_PWR_MGMT_L1_THRESH_4MS;
9990 tw32(PCIE_PWR_MGMT_THRESH, val);
9992 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9993 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9995 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9997 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9998 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10001 if (tg3_flag(tp, L1PLLPD_EN)) {
10002 u32 grc_mode = tr32(GRC_MODE);
10004 /* Access the lower 1K of PL PCIE block registers. */
10005 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10006 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10008 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10009 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10010 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10012 tw32(GRC_MODE, grc_mode);
10015 if (tg3_flag(tp, 57765_CLASS)) {
10016 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10017 u32 grc_mode = tr32(GRC_MODE);
10019 /* Access the lower 1K of PL PCIE block registers. */
10020 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10021 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10023 val = tr32(TG3_PCIE_TLDLPL_PORT +
10024 TG3_PCIE_PL_LO_PHYCTL5);
10025 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10026 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10028 tw32(GRC_MODE, grc_mode);
10031 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10034 /* Fix transmit hangs */
10035 val = tr32(TG3_CPMU_PADRNG_CTL);
10036 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10037 tw32(TG3_CPMU_PADRNG_CTL, val);
10039 grc_mode = tr32(GRC_MODE);
10041 /* Access the lower 1K of DL PCIE block registers. */
10042 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10043 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10045 val = tr32(TG3_PCIE_TLDLPL_PORT +
10046 TG3_PCIE_DL_LO_FTSMAX);
10047 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10048 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10049 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10051 tw32(GRC_MODE, grc_mode);
10054 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10055 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10056 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10057 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10060 /* This works around an issue with Athlon chipsets on
10061 * B3 tigon3 silicon. This bit has no effect on any
10062 * other revision. But do not set this on PCI Express
10063 * chips and don't even touch the clocks if the CPMU is present.
10065 if (!tg3_flag(tp, CPMU_PRESENT)) {
10066 if (!tg3_flag(tp, PCI_EXPRESS))
10067 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10068 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10071 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10072 tg3_flag(tp, PCIX_MODE)) {
10073 val = tr32(TG3PCI_PCISTATE);
10074 val |= PCISTATE_RETRY_SAME_DMA;
10075 tw32(TG3PCI_PCISTATE, val);
10078 if (tg3_flag(tp, ENABLE_APE)) {
10079 /* Allow reads and writes to the
10080 * APE register and memory space.
10082 val = tr32(TG3PCI_PCISTATE);
10083 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10084 PCISTATE_ALLOW_APE_SHMEM_WR |
10085 PCISTATE_ALLOW_APE_PSPACE_WR;
10086 tw32(TG3PCI_PCISTATE, val);
10089 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10090 /* Enable some hw fixes. */
10091 val = tr32(TG3PCI_MSI_DATA);
10092 val |= (1 << 26) | (1 << 28) | (1 << 29);
10093 tw32(TG3PCI_MSI_DATA, val);
10096 /* Descriptor ring init may make accesses to the
10097 * NIC SRAM area to setup the TX descriptors, so we
10098 * can only do this after the hardware has been
10099 * successfully reset.
10101 err = tg3_init_rings(tp);
10105 if (tg3_flag(tp, 57765_PLUS)) {
10106 val = tr32(TG3PCI_DMA_RW_CTRL) &
10107 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10108 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10109 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10110 if (!tg3_flag(tp, 57765_CLASS) &&
10111 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10112 tg3_asic_rev(tp) != ASIC_REV_5762)
10113 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10114 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10115 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10116 tg3_asic_rev(tp) != ASIC_REV_5761) {
10117 /* This value is determined during the probe time DMA
10118 * engine test, tg3_test_dma.
10120 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10123 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10124 GRC_MODE_4X_NIC_SEND_RINGS |
10125 GRC_MODE_NO_TX_PHDR_CSUM |
10126 GRC_MODE_NO_RX_PHDR_CSUM);
10127 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10129 /* Pseudo-header checksum is done by hardware logic and not
10130 * the offload processers, so make the chip do the pseudo-
10131 * header checksums on receive. For transmit it is more
10132 * convenient to do the pseudo-header checksum in software
10133 * as Linux does that on transmit for us in all cases.
10135 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10137 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10139 tw32(TG3_RX_PTP_CTL,
10140 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10142 if (tg3_flag(tp, PTP_CAPABLE))
10143 val |= GRC_MODE_TIME_SYNC_ENABLE;
10145 tw32(GRC_MODE, tp->grc_mode | val);
10147 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10148 * south bridge limitation. As a workaround, Driver is setting MRRS
10149 * to 2048 instead of default 4096.
10151 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10152 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10153 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10154 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10157 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10158 val = tr32(GRC_MISC_CFG);
10160 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10161 tw32(GRC_MISC_CFG, val);
10163 /* Initialize MBUF/DESC pool. */
10164 if (tg3_flag(tp, 5750_PLUS)) {
10166 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10167 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10168 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10169 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10171 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10172 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10173 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10174 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10177 fw_len = tp->fw_len;
10178 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10179 tw32(BUFMGR_MB_POOL_ADDR,
10180 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10181 tw32(BUFMGR_MB_POOL_SIZE,
10182 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10185 if (tp->dev->mtu <= ETH_DATA_LEN) {
10186 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10187 tp->bufmgr_config.mbuf_read_dma_low_water);
10188 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10189 tp->bufmgr_config.mbuf_mac_rx_low_water);
10190 tw32(BUFMGR_MB_HIGH_WATER,
10191 tp->bufmgr_config.mbuf_high_water);
10193 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10194 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10195 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10196 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10197 tw32(BUFMGR_MB_HIGH_WATER,
10198 tp->bufmgr_config.mbuf_high_water_jumbo);
10200 tw32(BUFMGR_DMA_LOW_WATER,
10201 tp->bufmgr_config.dma_low_water);
10202 tw32(BUFMGR_DMA_HIGH_WATER,
10203 tp->bufmgr_config.dma_high_water);
10205 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10206 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10207 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10208 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10209 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10210 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10211 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10212 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10213 tw32(BUFMGR_MODE, val);
10214 for (i = 0; i < 2000; i++) {
10215 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10220 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10224 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10225 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10227 tg3_setup_rxbd_thresholds(tp);
10229 /* Initialize TG3_BDINFO's at:
10230 * RCVDBDI_STD_BD: standard eth size rx ring
10231 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10232 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10235 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10236 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10237 * ring attribute flags
10238 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10240 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10241 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10243 * The size of each ring is fixed in the firmware, but the location is
10246 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10247 ((u64) tpr->rx_std_mapping >> 32));
10248 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10249 ((u64) tpr->rx_std_mapping & 0xffffffff));
10250 if (!tg3_flag(tp, 5717_PLUS))
10251 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10252 NIC_SRAM_RX_BUFFER_DESC);
10254 /* Disable the mini ring */
10255 if (!tg3_flag(tp, 5705_PLUS))
10256 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10257 BDINFO_FLAGS_DISABLED);
10259 /* Program the jumbo buffer descriptor ring control
10260 * blocks on those devices that have them.
10262 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10263 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10265 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10266 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10267 ((u64) tpr->rx_jmb_mapping >> 32));
10268 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10269 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10270 val = TG3_RX_JMB_RING_SIZE(tp) <<
10271 BDINFO_FLAGS_MAXLEN_SHIFT;
10272 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10273 val | BDINFO_FLAGS_USE_EXT_RECV);
10274 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10275 tg3_flag(tp, 57765_CLASS) ||
10276 tg3_asic_rev(tp) == ASIC_REV_5762)
10277 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10278 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10280 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10281 BDINFO_FLAGS_DISABLED);
10284 if (tg3_flag(tp, 57765_PLUS)) {
10285 val = TG3_RX_STD_RING_SIZE(tp);
10286 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10287 val |= (TG3_RX_STD_DMA_SZ << 2);
10289 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10291 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10293 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10295 tpr->rx_std_prod_idx = tp->rx_pending;
10296 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10298 tpr->rx_jmb_prod_idx =
10299 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10300 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10302 tg3_rings_reset(tp);
10304 /* Initialize MAC address and backoff seed. */
10305 __tg3_set_mac_addr(tp, false);
10307 /* MTU + ethernet header + FCS + optional VLAN tag */
10308 tw32(MAC_RX_MTU_SIZE,
10309 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10311 /* The slot time is changed by tg3_setup_phy if we
10312 * run at gigabit with half duplex.
10314 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10315 (6 << TX_LENGTHS_IPG_SHIFT) |
10316 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10318 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10319 tg3_asic_rev(tp) == ASIC_REV_5762)
10320 val |= tr32(MAC_TX_LENGTHS) &
10321 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10322 TX_LENGTHS_CNT_DWN_VAL_MSK);
10324 tw32(MAC_TX_LENGTHS, val);
10326 /* Receive rules. */
10327 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10328 tw32(RCVLPC_CONFIG, 0x0181);
10330 /* Calculate RDMAC_MODE setting early, we need it to determine
10331 * the RCVLPC_STATE_ENABLE mask.
10333 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10334 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10335 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10336 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10337 RDMAC_MODE_LNGREAD_ENAB);
10339 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10340 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10342 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10343 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10344 tg3_asic_rev(tp) == ASIC_REV_57780)
10345 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10346 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10347 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10349 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10350 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10351 if (tg3_flag(tp, TSO_CAPABLE)) {
10352 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10353 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10354 !tg3_flag(tp, IS_5788)) {
10355 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10359 if (tg3_flag(tp, PCI_EXPRESS))
10360 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10362 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10364 if (tp->dev->mtu <= ETH_DATA_LEN) {
10365 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10366 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10370 if (tg3_flag(tp, HW_TSO_1) ||
10371 tg3_flag(tp, HW_TSO_2) ||
10372 tg3_flag(tp, HW_TSO_3))
10373 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10375 if (tg3_flag(tp, 57765_PLUS) ||
10376 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10377 tg3_asic_rev(tp) == ASIC_REV_57780)
10378 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10380 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10381 tg3_asic_rev(tp) == ASIC_REV_5762)
10382 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10384 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10385 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10386 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10387 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10388 tg3_flag(tp, 57765_PLUS)) {
10391 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10392 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10394 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10396 val = tr32(tgtreg);
10397 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10398 tg3_asic_rev(tp) == ASIC_REV_5762) {
10399 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10400 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10401 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10402 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10403 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10404 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10406 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10409 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10410 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10411 tg3_asic_rev(tp) == ASIC_REV_5762) {
10414 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10415 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10417 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10419 val = tr32(tgtreg);
10421 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10422 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10425 /* Receive/send statistics. */
10426 if (tg3_flag(tp, 5750_PLUS)) {
10427 val = tr32(RCVLPC_STATS_ENABLE);
10428 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10429 tw32(RCVLPC_STATS_ENABLE, val);
10430 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10431 tg3_flag(tp, TSO_CAPABLE)) {
10432 val = tr32(RCVLPC_STATS_ENABLE);
10433 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10434 tw32(RCVLPC_STATS_ENABLE, val);
10436 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10438 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10439 tw32(SNDDATAI_STATSENAB, 0xffffff);
10440 tw32(SNDDATAI_STATSCTRL,
10441 (SNDDATAI_SCTRL_ENABLE |
10442 SNDDATAI_SCTRL_FASTUPD));
10444 /* Setup host coalescing engine. */
10445 tw32(HOSTCC_MODE, 0);
10446 for (i = 0; i < 2000; i++) {
10447 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10452 __tg3_set_coalesce(tp, &tp->coal);
10454 if (!tg3_flag(tp, 5705_PLUS)) {
10455 /* Status/statistics block address. See tg3_timer,
10456 * the tg3_periodic_fetch_stats call there, and
10457 * tg3_get_stats to see how this works for 5705/5750 chips.
10459 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10460 ((u64) tp->stats_mapping >> 32));
10461 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10462 ((u64) tp->stats_mapping & 0xffffffff));
10463 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10465 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10467 /* Clear statistics and status block memory areas */
10468 for (i = NIC_SRAM_STATS_BLK;
10469 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10470 i += sizeof(u32)) {
10471 tg3_write_mem(tp, i, 0);
10476 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10478 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10479 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10480 if (!tg3_flag(tp, 5705_PLUS))
10481 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10483 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10484 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10485 /* reset to prevent losing 1st rx packet intermittently */
10486 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10490 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10491 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10492 MAC_MODE_FHDE_ENABLE;
10493 if (tg3_flag(tp, ENABLE_APE))
10494 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10495 if (!tg3_flag(tp, 5705_PLUS) &&
10496 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10497 tg3_asic_rev(tp) != ASIC_REV_5700)
10498 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10499 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10502 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10503 * If TG3_FLAG_IS_NIC is zero, we should read the
10504 * register to preserve the GPIO settings for LOMs. The GPIOs,
10505 * whether used as inputs or outputs, are set by boot code after
10508 if (!tg3_flag(tp, IS_NIC)) {
10511 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10512 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10513 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10515 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10516 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10517 GRC_LCLCTRL_GPIO_OUTPUT3;
10519 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10520 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10522 tp->grc_local_ctrl &= ~gpio_mask;
10523 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10525 /* GPIO1 must be driven high for eeprom write protect */
10526 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10527 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10528 GRC_LCLCTRL_GPIO_OUTPUT1);
10530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10533 if (tg3_flag(tp, USING_MSIX)) {
10534 val = tr32(MSGINT_MODE);
10535 val |= MSGINT_MODE_ENABLE;
10536 if (tp->irq_cnt > 1)
10537 val |= MSGINT_MODE_MULTIVEC_EN;
10538 if (!tg3_flag(tp, 1SHOT_MSI))
10539 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10540 tw32(MSGINT_MODE, val);
10543 if (!tg3_flag(tp, 5705_PLUS)) {
10544 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10548 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10549 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10550 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10551 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10552 WDMAC_MODE_LNGREAD_ENAB);
10554 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10555 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10556 if (tg3_flag(tp, TSO_CAPABLE) &&
10557 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10558 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10560 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10561 !tg3_flag(tp, IS_5788)) {
10562 val |= WDMAC_MODE_RX_ACCEL;
10566 /* Enable host coalescing bug fix */
10567 if (tg3_flag(tp, 5755_PLUS))
10568 val |= WDMAC_MODE_STATUS_TAG_FIX;
10570 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10571 val |= WDMAC_MODE_BURST_ALL_DATA;
10573 tw32_f(WDMAC_MODE, val);
10576 if (tg3_flag(tp, PCIX_MODE)) {
10579 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10581 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10582 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10583 pcix_cmd |= PCI_X_CMD_READ_2K;
10584 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10585 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10586 pcix_cmd |= PCI_X_CMD_READ_2K;
10588 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10592 tw32_f(RDMAC_MODE, rdmac_mode);
10595 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10596 tg3_asic_rev(tp) == ASIC_REV_5720) {
10597 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10598 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10601 if (i < TG3_NUM_RDMA_CHANNELS) {
10602 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10603 val |= tg3_lso_rd_dma_workaround_bit(tp);
10604 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10605 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10609 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10610 if (!tg3_flag(tp, 5705_PLUS))
10611 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10613 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10614 tw32(SNDDATAC_MODE,
10615 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10617 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10619 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10620 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10621 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10622 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10623 val |= RCVDBDI_MODE_LRG_RING_SZ;
10624 tw32(RCVDBDI_MODE, val);
10625 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10626 if (tg3_flag(tp, HW_TSO_1) ||
10627 tg3_flag(tp, HW_TSO_2) ||
10628 tg3_flag(tp, HW_TSO_3))
10629 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10630 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10631 if (tg3_flag(tp, ENABLE_TSS))
10632 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10633 tw32(SNDBDI_MODE, val);
10634 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10636 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10637 err = tg3_load_5701_a0_firmware_fix(tp);
10642 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10643 /* Ignore any errors for the firmware download. If download
10644 * fails, the device will operate with EEE disabled
10646 tg3_load_57766_firmware(tp);
10649 if (tg3_flag(tp, TSO_CAPABLE)) {
10650 err = tg3_load_tso_firmware(tp);
10655 tp->tx_mode = TX_MODE_ENABLE;
10657 if (tg3_flag(tp, 5755_PLUS) ||
10658 tg3_asic_rev(tp) == ASIC_REV_5906)
10659 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10661 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10662 tg3_asic_rev(tp) == ASIC_REV_5762) {
10663 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10664 tp->tx_mode &= ~val;
10665 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10668 tw32_f(MAC_TX_MODE, tp->tx_mode);
10671 if (tg3_flag(tp, ENABLE_RSS)) {
10674 tg3_rss_write_indir_tbl(tp);
10676 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10678 for (i = 0; i < 10 ; i++)
10679 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10682 tp->rx_mode = RX_MODE_ENABLE;
10683 if (tg3_flag(tp, 5755_PLUS))
10684 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10686 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10687 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10689 if (tg3_flag(tp, ENABLE_RSS))
10690 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10691 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10692 RX_MODE_RSS_IPV6_HASH_EN |
10693 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10694 RX_MODE_RSS_IPV4_HASH_EN |
10695 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10697 tw32_f(MAC_RX_MODE, tp->rx_mode);
10700 tw32(MAC_LED_CTRL, tp->led_ctrl);
10702 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10703 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10704 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10707 tw32_f(MAC_RX_MODE, tp->rx_mode);
10710 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10711 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10712 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10713 /* Set drive transmission level to 1.2V */
10714 /* only if the signal pre-emphasis bit is not set */
10715 val = tr32(MAC_SERDES_CFG);
10718 tw32(MAC_SERDES_CFG, val);
10720 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10721 tw32(MAC_SERDES_CFG, 0x616000);
10724 /* Prevent chip from dropping frames when flow control
10727 if (tg3_flag(tp, 57765_CLASS))
10731 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10733 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10734 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10735 /* Use hardware link auto-negotiation */
10736 tg3_flag_set(tp, HW_AUTONEG);
10739 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10740 tg3_asic_rev(tp) == ASIC_REV_5714) {
10743 tmp = tr32(SERDES_RX_CTRL);
10744 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10745 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10746 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10747 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10750 if (!tg3_flag(tp, USE_PHYLIB)) {
10751 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10752 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10754 err = tg3_setup_phy(tp, false);
10758 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10759 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10762 /* Clear CRC stats. */
10763 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10764 tg3_writephy(tp, MII_TG3_TEST1,
10765 tmp | MII_TG3_TEST1_CRC_EN);
10766 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10771 __tg3_set_rx_mode(tp->dev);
10773 /* Initialize receive rules. */
10774 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10775 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10776 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10777 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10779 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10783 if (tg3_flag(tp, ENABLE_ASF))
10787 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10790 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10793 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10796 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10799 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10802 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10805 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10808 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10811 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10814 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10817 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10820 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10823 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10825 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10833 if (tg3_flag(tp, ENABLE_APE))
10834 /* Write our heartbeat update interval to APE. */
10835 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10836 APE_HOST_HEARTBEAT_INT_5SEC);
10838 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10843 /* Called at device open time to get the chip ready for
10844 * packet processing. Invoked with tp->lock held.
10846 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10848 /* Chip may have been just powered on. If so, the boot code may still
10849 * be running initialization. Wait for it to finish to avoid races in
10850 * accessing the hardware.
10852 tg3_enable_register_access(tp);
10855 tg3_switch_clocks(tp);
10857 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10859 return tg3_reset_hw(tp, reset_phy);
10862 #ifdef CONFIG_TIGON3_HWMON
10863 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10865 u32 off, len = TG3_OCIR_LEN;
10868 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10869 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10871 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10872 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10873 memset(ocir, 0, len);
10877 /* sysfs attributes for hwmon */
10878 static ssize_t tg3_show_temp(struct device *dev,
10879 struct device_attribute *devattr, char *buf)
10881 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10882 struct tg3 *tp = dev_get_drvdata(dev);
10885 spin_lock_bh(&tp->lock);
10886 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10887 sizeof(temperature));
10888 spin_unlock_bh(&tp->lock);
10889 return sprintf(buf, "%u\n", temperature * 1000);
10893 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10894 TG3_TEMP_SENSOR_OFFSET);
10895 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10896 TG3_TEMP_CAUTION_OFFSET);
10897 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10898 TG3_TEMP_MAX_OFFSET);
10900 static struct attribute *tg3_attrs[] = {
10901 &sensor_dev_attr_temp1_input.dev_attr.attr,
10902 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10903 &sensor_dev_attr_temp1_max.dev_attr.attr,
10906 ATTRIBUTE_GROUPS(tg3);
10908 static void tg3_hwmon_close(struct tg3 *tp)
10910 if (tp->hwmon_dev) {
10911 hwmon_device_unregister(tp->hwmon_dev);
10912 tp->hwmon_dev = NULL;
10916 static void tg3_hwmon_open(struct tg3 *tp)
10920 struct pci_dev *pdev = tp->pdev;
10921 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10923 tg3_sd_scan_scratchpad(tp, ocirs);
10925 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10926 if (!ocirs[i].src_data_length)
10929 size += ocirs[i].src_hdr_length;
10930 size += ocirs[i].src_data_length;
10936 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10938 if (IS_ERR(tp->hwmon_dev)) {
10939 tp->hwmon_dev = NULL;
10940 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10944 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10945 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10946 #endif /* CONFIG_TIGON3_HWMON */
10949 #define TG3_STAT_ADD32(PSTAT, REG) \
10950 do { u32 __val = tr32(REG); \
10951 (PSTAT)->low += __val; \
10952 if ((PSTAT)->low < __val) \
10953 (PSTAT)->high += 1; \
10956 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10958 struct tg3_hw_stats *sp = tp->hw_stats;
10963 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10964 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10965 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10966 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10967 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10968 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10969 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10970 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10971 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10972 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10973 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10974 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10975 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10976 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10977 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10978 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10981 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10982 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10983 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10984 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10987 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10988 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10989 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10990 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10991 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10992 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10993 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10994 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10995 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10996 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10997 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10998 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10999 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11000 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11002 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11003 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11004 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11005 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11006 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11007 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11009 u32 val = tr32(HOSTCC_FLOW_ATTN);
11010 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11012 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11013 sp->rx_discards.low += val;
11014 if (sp->rx_discards.low < val)
11015 sp->rx_discards.high += 1;
11017 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11019 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11022 static void tg3_chk_missed_msi(struct tg3 *tp)
11026 for (i = 0; i < tp->irq_cnt; i++) {
11027 struct tg3_napi *tnapi = &tp->napi[i];
11029 if (tg3_has_work(tnapi)) {
11030 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11031 tnapi->last_tx_cons == tnapi->tx_cons) {
11032 if (tnapi->chk_msi_cnt < 1) {
11033 tnapi->chk_msi_cnt++;
11039 tnapi->chk_msi_cnt = 0;
11040 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11041 tnapi->last_tx_cons = tnapi->tx_cons;
11045 static void tg3_timer(struct timer_list *t)
11047 struct tg3 *tp = from_timer(tp, t, timer);
11049 spin_lock(&tp->lock);
11051 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11052 spin_unlock(&tp->lock);
11053 goto restart_timer;
11056 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11057 tg3_flag(tp, 57765_CLASS))
11058 tg3_chk_missed_msi(tp);
11060 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11061 /* BCM4785: Flush posted writes from GbE to host memory. */
11065 if (!tg3_flag(tp, TAGGED_STATUS)) {
11066 /* All of this garbage is because when using non-tagged
11067 * IRQ status the mailbox/status_block protocol the chip
11068 * uses with the cpu is race prone.
11070 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11071 tw32(GRC_LOCAL_CTRL,
11072 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11074 tw32(HOSTCC_MODE, tp->coalesce_mode |
11075 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11078 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11079 spin_unlock(&tp->lock);
11080 tg3_reset_task_schedule(tp);
11081 goto restart_timer;
11085 /* This part only runs once per second. */
11086 if (!--tp->timer_counter) {
11087 if (tg3_flag(tp, 5705_PLUS))
11088 tg3_periodic_fetch_stats(tp);
11090 if (tp->setlpicnt && !--tp->setlpicnt)
11091 tg3_phy_eee_enable(tp);
11093 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11097 mac_stat = tr32(MAC_STATUS);
11100 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11101 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11103 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11107 tg3_setup_phy(tp, false);
11108 } else if (tg3_flag(tp, POLL_SERDES)) {
11109 u32 mac_stat = tr32(MAC_STATUS);
11110 int need_setup = 0;
11113 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11116 if (!tp->link_up &&
11117 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11118 MAC_STATUS_SIGNAL_DET))) {
11122 if (!tp->serdes_counter) {
11125 ~MAC_MODE_PORT_MODE_MASK));
11127 tw32_f(MAC_MODE, tp->mac_mode);
11130 tg3_setup_phy(tp, false);
11132 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11133 tg3_flag(tp, 5780_CLASS)) {
11134 tg3_serdes_parallel_detect(tp);
11135 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11136 u32 cpmu = tr32(TG3_CPMU_STATUS);
11137 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11138 TG3_CPMU_STATUS_LINK_MASK);
11140 if (link_up != tp->link_up)
11141 tg3_setup_phy(tp, false);
11144 tp->timer_counter = tp->timer_multiplier;
11147 /* Heartbeat is only sent once every 2 seconds.
11149 * The heartbeat is to tell the ASF firmware that the host
11150 * driver is still alive. In the event that the OS crashes,
11151 * ASF needs to reset the hardware to free up the FIFO space
11152 * that may be filled with rx packets destined for the host.
11153 * If the FIFO is full, ASF will no longer function properly.
11155 * Unintended resets have been reported on real time kernels
11156 * where the timer doesn't run on time. Netpoll will also have
11159 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11160 * to check the ring condition when the heartbeat is expiring
11161 * before doing the reset. This will prevent most unintended
11164 if (!--tp->asf_counter) {
11165 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11166 tg3_wait_for_event_ack(tp);
11168 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11169 FWCMD_NICDRV_ALIVE3);
11170 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11171 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11172 TG3_FW_UPDATE_TIMEOUT_SEC);
11174 tg3_generate_fw_event(tp);
11176 tp->asf_counter = tp->asf_multiplier;
11179 /* Update the APE heartbeat every 5 seconds.*/
11180 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11182 spin_unlock(&tp->lock);
11185 tp->timer.expires = jiffies + tp->timer_offset;
11186 add_timer(&tp->timer);
11189 static void tg3_timer_init(struct tg3 *tp)
11191 if (tg3_flag(tp, TAGGED_STATUS) &&
11192 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11193 !tg3_flag(tp, 57765_CLASS))
11194 tp->timer_offset = HZ;
11196 tp->timer_offset = HZ / 10;
11198 BUG_ON(tp->timer_offset > HZ);
11200 tp->timer_multiplier = (HZ / tp->timer_offset);
11201 tp->asf_multiplier = (HZ / tp->timer_offset) *
11202 TG3_FW_UPDATE_FREQ_SEC;
11204 timer_setup(&tp->timer, tg3_timer, 0);
11207 static void tg3_timer_start(struct tg3 *tp)
11209 tp->asf_counter = tp->asf_multiplier;
11210 tp->timer_counter = tp->timer_multiplier;
11212 tp->timer.expires = jiffies + tp->timer_offset;
11213 add_timer(&tp->timer);
11216 static void tg3_timer_stop(struct tg3 *tp)
11218 del_timer_sync(&tp->timer);
11221 /* Restart hardware after configuration changes, self-test, etc.
11222 * Invoked with tp->lock held.
11224 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11225 __releases(tp->lock)
11226 __acquires(tp->lock)
11230 err = tg3_init_hw(tp, reset_phy);
11232 netdev_err(tp->dev,
11233 "Failed to re-initialize device, aborting\n");
11234 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11235 tg3_full_unlock(tp);
11236 tg3_timer_stop(tp);
11238 tg3_napi_enable(tp);
11239 dev_close(tp->dev);
11240 tg3_full_lock(tp, 0);
11245 static void tg3_reset_task(struct work_struct *work)
11247 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11251 tg3_full_lock(tp, 0);
11253 if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11254 tg3_flag_clear(tp, RESET_TASK_PENDING);
11255 tg3_full_unlock(tp);
11260 tg3_full_unlock(tp);
11264 tg3_netif_stop(tp);
11266 tg3_full_lock(tp, 1);
11268 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11269 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11270 tp->write32_rx_mbox = tg3_write_flush_reg32;
11271 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11272 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11275 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11276 err = tg3_init_hw(tp, true);
11278 tg3_full_unlock(tp);
11280 tg3_napi_enable(tp);
11281 /* Clear this flag so that tg3_reset_task_cancel() will not
11282 * call cancel_work_sync() and wait forever.
11284 tg3_flag_clear(tp, RESET_TASK_PENDING);
11285 dev_close(tp->dev);
11289 tg3_netif_start(tp);
11290 tg3_full_unlock(tp);
11292 tg3_flag_clear(tp, RESET_TASK_PENDING);
11297 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11300 unsigned long flags;
11302 struct tg3_napi *tnapi = &tp->napi[irq_num];
11304 if (tp->irq_cnt == 1)
11305 name = tp->dev->name;
11307 name = &tnapi->irq_lbl[0];
11308 if (tnapi->tx_buffers && tnapi->rx_rcb)
11309 snprintf(name, IFNAMSIZ,
11310 "%s-txrx-%d", tp->dev->name, irq_num);
11311 else if (tnapi->tx_buffers)
11312 snprintf(name, IFNAMSIZ,
11313 "%s-tx-%d", tp->dev->name, irq_num);
11314 else if (tnapi->rx_rcb)
11315 snprintf(name, IFNAMSIZ,
11316 "%s-rx-%d", tp->dev->name, irq_num);
11318 snprintf(name, IFNAMSIZ,
11319 "%s-%d", tp->dev->name, irq_num);
11320 name[IFNAMSIZ-1] = 0;
11323 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11325 if (tg3_flag(tp, 1SHOT_MSI))
11326 fn = tg3_msi_1shot;
11329 fn = tg3_interrupt;
11330 if (tg3_flag(tp, TAGGED_STATUS))
11331 fn = tg3_interrupt_tagged;
11332 flags = IRQF_SHARED;
11335 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11338 static int tg3_test_interrupt(struct tg3 *tp)
11340 struct tg3_napi *tnapi = &tp->napi[0];
11341 struct net_device *dev = tp->dev;
11342 int err, i, intr_ok = 0;
11345 if (!netif_running(dev))
11348 tg3_disable_ints(tp);
11350 free_irq(tnapi->irq_vec, tnapi);
11353 * Turn off MSI one shot mode. Otherwise this test has no
11354 * observable way to know whether the interrupt was delivered.
11356 if (tg3_flag(tp, 57765_PLUS)) {
11357 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11358 tw32(MSGINT_MODE, val);
11361 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11362 IRQF_SHARED, dev->name, tnapi);
11366 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11367 tg3_enable_ints(tp);
11369 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11372 for (i = 0; i < 5; i++) {
11373 u32 int_mbox, misc_host_ctrl;
11375 int_mbox = tr32_mailbox(tnapi->int_mbox);
11376 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11378 if ((int_mbox != 0) ||
11379 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11384 if (tg3_flag(tp, 57765_PLUS) &&
11385 tnapi->hw_status->status_tag != tnapi->last_tag)
11386 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11391 tg3_disable_ints(tp);
11393 free_irq(tnapi->irq_vec, tnapi);
11395 err = tg3_request_irq(tp, 0);
11401 /* Reenable MSI one shot mode. */
11402 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11403 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11404 tw32(MSGINT_MODE, val);
11412 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11413 * successfully restored
11415 static int tg3_test_msi(struct tg3 *tp)
11420 if (!tg3_flag(tp, USING_MSI))
11423 /* Turn off SERR reporting in case MSI terminates with Master
11426 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11427 pci_write_config_word(tp->pdev, PCI_COMMAND,
11428 pci_cmd & ~PCI_COMMAND_SERR);
11430 err = tg3_test_interrupt(tp);
11432 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11437 /* other failures */
11441 /* MSI test failed, go back to INTx mode */
11442 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11443 "to INTx mode. Please report this failure to the PCI "
11444 "maintainer and include system chipset information\n");
11446 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11448 pci_disable_msi(tp->pdev);
11450 tg3_flag_clear(tp, USING_MSI);
11451 tp->napi[0].irq_vec = tp->pdev->irq;
11453 err = tg3_request_irq(tp, 0);
11457 /* Need to reset the chip because the MSI cycle may have terminated
11458 * with Master Abort.
11460 tg3_full_lock(tp, 1);
11462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11463 err = tg3_init_hw(tp, true);
11465 tg3_full_unlock(tp);
11468 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11473 static int tg3_request_firmware(struct tg3 *tp)
11475 const struct tg3_firmware_hdr *fw_hdr;
11477 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11478 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11483 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11485 /* Firmware blob starts with version numbers, followed by
11486 * start address and _full_ length including BSS sections
11487 * (which must be longer than the actual data, of course
11490 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11491 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11492 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11493 tp->fw_len, tp->fw_needed);
11494 release_firmware(tp->fw);
11499 /* We no longer need firmware; we have it. */
11500 tp->fw_needed = NULL;
11504 static u32 tg3_irq_count(struct tg3 *tp)
11506 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11509 /* We want as many rx rings enabled as there are cpus.
11510 * In multiqueue MSI-X mode, the first MSI-X vector
11511 * only deals with link interrupts, etc, so we add
11512 * one to the number of vectors we are requesting.
11514 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11520 static bool tg3_enable_msix(struct tg3 *tp)
11523 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11525 tp->txq_cnt = tp->txq_req;
11526 tp->rxq_cnt = tp->rxq_req;
11528 tp->rxq_cnt = netif_get_num_default_rss_queues();
11529 if (tp->rxq_cnt > tp->rxq_max)
11530 tp->rxq_cnt = tp->rxq_max;
11532 /* Disable multiple TX rings by default. Simple round-robin hardware
11533 * scheduling of the TX rings can cause starvation of rings with
11534 * small packets when other rings have TSO or jumbo packets.
11539 tp->irq_cnt = tg3_irq_count(tp);
11541 for (i = 0; i < tp->irq_max; i++) {
11542 msix_ent[i].entry = i;
11543 msix_ent[i].vector = 0;
11546 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11549 } else if (rc < tp->irq_cnt) {
11550 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11553 tp->rxq_cnt = max(rc - 1, 1);
11555 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11558 for (i = 0; i < tp->irq_max; i++)
11559 tp->napi[i].irq_vec = msix_ent[i].vector;
11561 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11562 pci_disable_msix(tp->pdev);
11566 if (tp->irq_cnt == 1)
11569 tg3_flag_set(tp, ENABLE_RSS);
11571 if (tp->txq_cnt > 1)
11572 tg3_flag_set(tp, ENABLE_TSS);
11574 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11579 static void tg3_ints_init(struct tg3 *tp)
11581 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11582 !tg3_flag(tp, TAGGED_STATUS)) {
11583 /* All MSI supporting chips should support tagged
11584 * status. Assert that this is the case.
11586 netdev_warn(tp->dev,
11587 "MSI without TAGGED_STATUS? Not using MSI\n");
11591 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11592 tg3_flag_set(tp, USING_MSIX);
11593 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11594 tg3_flag_set(tp, USING_MSI);
11596 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11597 u32 msi_mode = tr32(MSGINT_MODE);
11598 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11599 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11600 if (!tg3_flag(tp, 1SHOT_MSI))
11601 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11602 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11605 if (!tg3_flag(tp, USING_MSIX)) {
11607 tp->napi[0].irq_vec = tp->pdev->irq;
11610 if (tp->irq_cnt == 1) {
11613 netif_set_real_num_tx_queues(tp->dev, 1);
11614 netif_set_real_num_rx_queues(tp->dev, 1);
11618 static void tg3_ints_fini(struct tg3 *tp)
11620 if (tg3_flag(tp, USING_MSIX))
11621 pci_disable_msix(tp->pdev);
11622 else if (tg3_flag(tp, USING_MSI))
11623 pci_disable_msi(tp->pdev);
11624 tg3_flag_clear(tp, USING_MSI);
11625 tg3_flag_clear(tp, USING_MSIX);
11626 tg3_flag_clear(tp, ENABLE_RSS);
11627 tg3_flag_clear(tp, ENABLE_TSS);
11630 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11633 struct net_device *dev = tp->dev;
11637 * Setup interrupts first so we know how
11638 * many NAPI resources to allocate
11642 tg3_rss_check_indir_tbl(tp);
11644 /* The placement of this call is tied
11645 * to the setup and use of Host TX descriptors.
11647 err = tg3_alloc_consistent(tp);
11649 goto out_ints_fini;
11653 tg3_napi_enable(tp);
11655 for (i = 0; i < tp->irq_cnt; i++) {
11656 err = tg3_request_irq(tp, i);
11658 for (i--; i >= 0; i--) {
11659 struct tg3_napi *tnapi = &tp->napi[i];
11661 free_irq(tnapi->irq_vec, tnapi);
11663 goto out_napi_fini;
11667 tg3_full_lock(tp, 0);
11670 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11672 err = tg3_init_hw(tp, reset_phy);
11674 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11675 tg3_free_rings(tp);
11678 tg3_full_unlock(tp);
11683 if (test_irq && tg3_flag(tp, USING_MSI)) {
11684 err = tg3_test_msi(tp);
11687 tg3_full_lock(tp, 0);
11688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11689 tg3_free_rings(tp);
11690 tg3_full_unlock(tp);
11692 goto out_napi_fini;
11695 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11696 u32 val = tr32(PCIE_TRANSACTION_CFG);
11698 tw32(PCIE_TRANSACTION_CFG,
11699 val | PCIE_TRANS_CFG_1SHOT_MSI);
11705 tg3_hwmon_open(tp);
11707 tg3_full_lock(tp, 0);
11709 tg3_timer_start(tp);
11710 tg3_flag_set(tp, INIT_COMPLETE);
11711 tg3_enable_ints(tp);
11713 tg3_ptp_resume(tp);
11715 tg3_full_unlock(tp);
11717 netif_tx_start_all_queues(dev);
11720 * Reset loopback feature if it was turned on while the device was down
11721 * make sure that it's installed properly now.
11723 if (dev->features & NETIF_F_LOOPBACK)
11724 tg3_set_loopback(dev, dev->features);
11729 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11730 struct tg3_napi *tnapi = &tp->napi[i];
11731 free_irq(tnapi->irq_vec, tnapi);
11735 tg3_napi_disable(tp);
11737 tg3_free_consistent(tp);
11745 static void tg3_stop(struct tg3 *tp)
11749 tg3_reset_task_cancel(tp);
11750 tg3_netif_stop(tp);
11752 tg3_timer_stop(tp);
11754 tg3_hwmon_close(tp);
11758 tg3_full_lock(tp, 1);
11760 tg3_disable_ints(tp);
11762 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11763 tg3_free_rings(tp);
11764 tg3_flag_clear(tp, INIT_COMPLETE);
11766 tg3_full_unlock(tp);
11768 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11769 struct tg3_napi *tnapi = &tp->napi[i];
11770 free_irq(tnapi->irq_vec, tnapi);
11777 tg3_free_consistent(tp);
11780 static int tg3_open(struct net_device *dev)
11782 struct tg3 *tp = netdev_priv(dev);
11785 if (tp->pcierr_recovery) {
11786 netdev_err(dev, "Failed to open device. PCI error recovery "
11791 if (tp->fw_needed) {
11792 err = tg3_request_firmware(tp);
11793 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11795 netdev_warn(tp->dev, "EEE capability disabled\n");
11796 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11797 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11798 netdev_warn(tp->dev, "EEE capability restored\n");
11799 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11801 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11805 netdev_warn(tp->dev, "TSO capability disabled\n");
11806 tg3_flag_clear(tp, TSO_CAPABLE);
11807 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11808 netdev_notice(tp->dev, "TSO capability restored\n");
11809 tg3_flag_set(tp, TSO_CAPABLE);
11813 tg3_carrier_off(tp);
11815 err = tg3_power_up(tp);
11819 tg3_full_lock(tp, 0);
11821 tg3_disable_ints(tp);
11822 tg3_flag_clear(tp, INIT_COMPLETE);
11824 tg3_full_unlock(tp);
11826 err = tg3_start(tp,
11827 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11830 tg3_frob_aux_power(tp, false);
11831 pci_set_power_state(tp->pdev, PCI_D3hot);
11837 static int tg3_close(struct net_device *dev)
11839 struct tg3 *tp = netdev_priv(dev);
11841 if (tp->pcierr_recovery) {
11842 netdev_err(dev, "Failed to close device. PCI error recovery "
11849 if (pci_device_is_present(tp->pdev)) {
11850 tg3_power_down_prepare(tp);
11852 tg3_carrier_off(tp);
11857 static inline u64 get_stat64(tg3_stat64_t *val)
11859 return ((u64)val->high << 32) | ((u64)val->low);
11862 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11864 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11866 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11867 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11868 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11871 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11872 tg3_writephy(tp, MII_TG3_TEST1,
11873 val | MII_TG3_TEST1_CRC_EN);
11874 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11878 tp->phy_crc_errors += val;
11880 return tp->phy_crc_errors;
11883 return get_stat64(&hw_stats->rx_fcs_errors);
11886 #define ESTAT_ADD(member) \
11887 estats->member = old_estats->member + \
11888 get_stat64(&hw_stats->member)
11890 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11892 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11893 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11895 ESTAT_ADD(rx_octets);
11896 ESTAT_ADD(rx_fragments);
11897 ESTAT_ADD(rx_ucast_packets);
11898 ESTAT_ADD(rx_mcast_packets);
11899 ESTAT_ADD(rx_bcast_packets);
11900 ESTAT_ADD(rx_fcs_errors);
11901 ESTAT_ADD(rx_align_errors);
11902 ESTAT_ADD(rx_xon_pause_rcvd);
11903 ESTAT_ADD(rx_xoff_pause_rcvd);
11904 ESTAT_ADD(rx_mac_ctrl_rcvd);
11905 ESTAT_ADD(rx_xoff_entered);
11906 ESTAT_ADD(rx_frame_too_long_errors);
11907 ESTAT_ADD(rx_jabbers);
11908 ESTAT_ADD(rx_undersize_packets);
11909 ESTAT_ADD(rx_in_length_errors);
11910 ESTAT_ADD(rx_out_length_errors);
11911 ESTAT_ADD(rx_64_or_less_octet_packets);
11912 ESTAT_ADD(rx_65_to_127_octet_packets);
11913 ESTAT_ADD(rx_128_to_255_octet_packets);
11914 ESTAT_ADD(rx_256_to_511_octet_packets);
11915 ESTAT_ADD(rx_512_to_1023_octet_packets);
11916 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11917 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11918 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11919 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11920 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11922 ESTAT_ADD(tx_octets);
11923 ESTAT_ADD(tx_collisions);
11924 ESTAT_ADD(tx_xon_sent);
11925 ESTAT_ADD(tx_xoff_sent);
11926 ESTAT_ADD(tx_flow_control);
11927 ESTAT_ADD(tx_mac_errors);
11928 ESTAT_ADD(tx_single_collisions);
11929 ESTAT_ADD(tx_mult_collisions);
11930 ESTAT_ADD(tx_deferred);
11931 ESTAT_ADD(tx_excessive_collisions);
11932 ESTAT_ADD(tx_late_collisions);
11933 ESTAT_ADD(tx_collide_2times);
11934 ESTAT_ADD(tx_collide_3times);
11935 ESTAT_ADD(tx_collide_4times);
11936 ESTAT_ADD(tx_collide_5times);
11937 ESTAT_ADD(tx_collide_6times);
11938 ESTAT_ADD(tx_collide_7times);
11939 ESTAT_ADD(tx_collide_8times);
11940 ESTAT_ADD(tx_collide_9times);
11941 ESTAT_ADD(tx_collide_10times);
11942 ESTAT_ADD(tx_collide_11times);
11943 ESTAT_ADD(tx_collide_12times);
11944 ESTAT_ADD(tx_collide_13times);
11945 ESTAT_ADD(tx_collide_14times);
11946 ESTAT_ADD(tx_collide_15times);
11947 ESTAT_ADD(tx_ucast_packets);
11948 ESTAT_ADD(tx_mcast_packets);
11949 ESTAT_ADD(tx_bcast_packets);
11950 ESTAT_ADD(tx_carrier_sense_errors);
11951 ESTAT_ADD(tx_discards);
11952 ESTAT_ADD(tx_errors);
11954 ESTAT_ADD(dma_writeq_full);
11955 ESTAT_ADD(dma_write_prioq_full);
11956 ESTAT_ADD(rxbds_empty);
11957 ESTAT_ADD(rx_discards);
11958 ESTAT_ADD(rx_errors);
11959 ESTAT_ADD(rx_threshold_hit);
11961 ESTAT_ADD(dma_readq_full);
11962 ESTAT_ADD(dma_read_prioq_full);
11963 ESTAT_ADD(tx_comp_queue_full);
11965 ESTAT_ADD(ring_set_send_prod_index);
11966 ESTAT_ADD(ring_status_update);
11967 ESTAT_ADD(nic_irqs);
11968 ESTAT_ADD(nic_avoided_irqs);
11969 ESTAT_ADD(nic_tx_threshold_hit);
11971 ESTAT_ADD(mbuf_lwm_thresh_hit);
11974 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11976 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11977 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11979 stats->rx_packets = old_stats->rx_packets +
11980 get_stat64(&hw_stats->rx_ucast_packets) +
11981 get_stat64(&hw_stats->rx_mcast_packets) +
11982 get_stat64(&hw_stats->rx_bcast_packets);
11984 stats->tx_packets = old_stats->tx_packets +
11985 get_stat64(&hw_stats->tx_ucast_packets) +
11986 get_stat64(&hw_stats->tx_mcast_packets) +
11987 get_stat64(&hw_stats->tx_bcast_packets);
11989 stats->rx_bytes = old_stats->rx_bytes +
11990 get_stat64(&hw_stats->rx_octets);
11991 stats->tx_bytes = old_stats->tx_bytes +
11992 get_stat64(&hw_stats->tx_octets);
11994 stats->rx_errors = old_stats->rx_errors +
11995 get_stat64(&hw_stats->rx_errors);
11996 stats->tx_errors = old_stats->tx_errors +
11997 get_stat64(&hw_stats->tx_errors) +
11998 get_stat64(&hw_stats->tx_mac_errors) +
11999 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12000 get_stat64(&hw_stats->tx_discards);
12002 stats->multicast = old_stats->multicast +
12003 get_stat64(&hw_stats->rx_mcast_packets);
12004 stats->collisions = old_stats->collisions +
12005 get_stat64(&hw_stats->tx_collisions);
12007 stats->rx_length_errors = old_stats->rx_length_errors +
12008 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12009 get_stat64(&hw_stats->rx_undersize_packets);
12011 stats->rx_frame_errors = old_stats->rx_frame_errors +
12012 get_stat64(&hw_stats->rx_align_errors);
12013 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12014 get_stat64(&hw_stats->tx_discards);
12015 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12016 get_stat64(&hw_stats->tx_carrier_sense_errors);
12018 stats->rx_crc_errors = old_stats->rx_crc_errors +
12019 tg3_calc_crc_errors(tp);
12021 stats->rx_missed_errors = old_stats->rx_missed_errors +
12022 get_stat64(&hw_stats->rx_discards);
12024 stats->rx_dropped = tp->rx_dropped;
12025 stats->tx_dropped = tp->tx_dropped;
12028 static int tg3_get_regs_len(struct net_device *dev)
12030 return TG3_REG_BLK_SIZE;
12033 static void tg3_get_regs(struct net_device *dev,
12034 struct ethtool_regs *regs, void *_p)
12036 struct tg3 *tp = netdev_priv(dev);
12040 memset(_p, 0, TG3_REG_BLK_SIZE);
12042 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12045 tg3_full_lock(tp, 0);
12047 tg3_dump_legacy_regs(tp, (u32 *)_p);
12049 tg3_full_unlock(tp);
12052 static int tg3_get_eeprom_len(struct net_device *dev)
12054 struct tg3 *tp = netdev_priv(dev);
12056 return tp->nvram_size;
12059 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12061 struct tg3 *tp = netdev_priv(dev);
12062 int ret, cpmu_restore = 0;
12064 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12067 if (tg3_flag(tp, NO_NVRAM))
12070 offset = eeprom->offset;
12074 eeprom->magic = TG3_EEPROM_MAGIC;
12076 /* Override clock, link aware and link idle modes */
12077 if (tg3_flag(tp, CPMU_PRESENT)) {
12078 cpmu_val = tr32(TG3_CPMU_CTRL);
12079 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12080 CPMU_CTRL_LINK_IDLE_MODE)) {
12081 tw32(TG3_CPMU_CTRL, cpmu_val &
12082 ~(CPMU_CTRL_LINK_AWARE_MODE |
12083 CPMU_CTRL_LINK_IDLE_MODE));
12087 tg3_override_clk(tp);
12090 /* adjustments to start on required 4 byte boundary */
12091 b_offset = offset & 3;
12092 b_count = 4 - b_offset;
12093 if (b_count > len) {
12094 /* i.e. offset=1 len=2 */
12097 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12100 memcpy(data, ((char *)&val) + b_offset, b_count);
12103 eeprom->len += b_count;
12106 /* read bytes up to the last 4 byte boundary */
12107 pd = &data[eeprom->len];
12108 for (i = 0; i < (len - (len & 3)); i += 4) {
12109 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12116 memcpy(pd + i, &val, 4);
12117 if (need_resched()) {
12118 if (signal_pending(current)) {
12129 /* read last bytes not ending on 4 byte boundary */
12130 pd = &data[eeprom->len];
12132 b_offset = offset + len - b_count;
12133 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12136 memcpy(pd, &val, b_count);
12137 eeprom->len += b_count;
12142 /* Restore clock, link aware and link idle modes */
12143 tg3_restore_clk(tp);
12145 tw32(TG3_CPMU_CTRL, cpmu_val);
12150 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12152 struct tg3 *tp = netdev_priv(dev);
12154 u32 offset, len, b_offset, odd_len;
12156 __be32 start = 0, end;
12158 if (tg3_flag(tp, NO_NVRAM) ||
12159 eeprom->magic != TG3_EEPROM_MAGIC)
12162 offset = eeprom->offset;
12165 if ((b_offset = (offset & 3))) {
12166 /* adjustments to start on required 4 byte boundary */
12167 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12178 /* adjustments to end on required 4 byte boundary */
12180 len = (len + 3) & ~3;
12181 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12187 if (b_offset || odd_len) {
12188 buf = kmalloc(len, GFP_KERNEL);
12192 memcpy(buf, &start, 4);
12194 memcpy(buf+len-4, &end, 4);
12195 memcpy(buf + b_offset, data, eeprom->len);
12198 ret = tg3_nvram_write_block(tp, offset, len, buf);
12206 static int tg3_get_link_ksettings(struct net_device *dev,
12207 struct ethtool_link_ksettings *cmd)
12209 struct tg3 *tp = netdev_priv(dev);
12210 u32 supported, advertising;
12212 if (tg3_flag(tp, USE_PHYLIB)) {
12213 struct phy_device *phydev;
12214 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12216 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12217 phy_ethtool_ksettings_get(phydev, cmd);
12222 supported = (SUPPORTED_Autoneg);
12224 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12225 supported |= (SUPPORTED_1000baseT_Half |
12226 SUPPORTED_1000baseT_Full);
12228 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12229 supported |= (SUPPORTED_100baseT_Half |
12230 SUPPORTED_100baseT_Full |
12231 SUPPORTED_10baseT_Half |
12232 SUPPORTED_10baseT_Full |
12234 cmd->base.port = PORT_TP;
12236 supported |= SUPPORTED_FIBRE;
12237 cmd->base.port = PORT_FIBRE;
12239 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12242 advertising = tp->link_config.advertising;
12243 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12244 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12245 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12246 advertising |= ADVERTISED_Pause;
12248 advertising |= ADVERTISED_Pause |
12249 ADVERTISED_Asym_Pause;
12251 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12252 advertising |= ADVERTISED_Asym_Pause;
12255 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12258 if (netif_running(dev) && tp->link_up) {
12259 cmd->base.speed = tp->link_config.active_speed;
12260 cmd->base.duplex = tp->link_config.active_duplex;
12261 ethtool_convert_legacy_u32_to_link_mode(
12262 cmd->link_modes.lp_advertising,
12263 tp->link_config.rmt_adv);
12265 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12266 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12267 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12269 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12272 cmd->base.speed = SPEED_UNKNOWN;
12273 cmd->base.duplex = DUPLEX_UNKNOWN;
12274 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12276 cmd->base.phy_address = tp->phy_addr;
12277 cmd->base.autoneg = tp->link_config.autoneg;
12281 static int tg3_set_link_ksettings(struct net_device *dev,
12282 const struct ethtool_link_ksettings *cmd)
12284 struct tg3 *tp = netdev_priv(dev);
12285 u32 speed = cmd->base.speed;
12288 if (tg3_flag(tp, USE_PHYLIB)) {
12289 struct phy_device *phydev;
12290 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12292 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12293 return phy_ethtool_ksettings_set(phydev, cmd);
12296 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12297 cmd->base.autoneg != AUTONEG_DISABLE)
12300 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12301 cmd->base.duplex != DUPLEX_FULL &&
12302 cmd->base.duplex != DUPLEX_HALF)
12305 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12306 cmd->link_modes.advertising);
12308 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12309 u32 mask = ADVERTISED_Autoneg |
12311 ADVERTISED_Asym_Pause;
12313 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12314 mask |= ADVERTISED_1000baseT_Half |
12315 ADVERTISED_1000baseT_Full;
12317 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12318 mask |= ADVERTISED_100baseT_Half |
12319 ADVERTISED_100baseT_Full |
12320 ADVERTISED_10baseT_Half |
12321 ADVERTISED_10baseT_Full |
12324 mask |= ADVERTISED_FIBRE;
12326 if (advertising & ~mask)
12329 mask &= (ADVERTISED_1000baseT_Half |
12330 ADVERTISED_1000baseT_Full |
12331 ADVERTISED_100baseT_Half |
12332 ADVERTISED_100baseT_Full |
12333 ADVERTISED_10baseT_Half |
12334 ADVERTISED_10baseT_Full);
12336 advertising &= mask;
12338 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12339 if (speed != SPEED_1000)
12342 if (cmd->base.duplex != DUPLEX_FULL)
12345 if (speed != SPEED_100 &&
12351 tg3_full_lock(tp, 0);
12353 tp->link_config.autoneg = cmd->base.autoneg;
12354 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12355 tp->link_config.advertising = (advertising |
12356 ADVERTISED_Autoneg);
12357 tp->link_config.speed = SPEED_UNKNOWN;
12358 tp->link_config.duplex = DUPLEX_UNKNOWN;
12360 tp->link_config.advertising = 0;
12361 tp->link_config.speed = speed;
12362 tp->link_config.duplex = cmd->base.duplex;
12365 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12367 tg3_warn_mgmt_link_flap(tp);
12369 if (netif_running(dev))
12370 tg3_setup_phy(tp, true);
12372 tg3_full_unlock(tp);
12377 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12379 struct tg3 *tp = netdev_priv(dev);
12381 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12382 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12383 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12386 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12388 struct tg3 *tp = netdev_priv(dev);
12390 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12391 wol->supported = WAKE_MAGIC;
12393 wol->supported = 0;
12395 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12396 wol->wolopts = WAKE_MAGIC;
12397 memset(&wol->sopass, 0, sizeof(wol->sopass));
12400 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12402 struct tg3 *tp = netdev_priv(dev);
12403 struct device *dp = &tp->pdev->dev;
12405 if (wol->wolopts & ~WAKE_MAGIC)
12407 if ((wol->wolopts & WAKE_MAGIC) &&
12408 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12411 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12413 if (device_may_wakeup(dp))
12414 tg3_flag_set(tp, WOL_ENABLE);
12416 tg3_flag_clear(tp, WOL_ENABLE);
12421 static u32 tg3_get_msglevel(struct net_device *dev)
12423 struct tg3 *tp = netdev_priv(dev);
12424 return tp->msg_enable;
12427 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12429 struct tg3 *tp = netdev_priv(dev);
12430 tp->msg_enable = value;
12433 static int tg3_nway_reset(struct net_device *dev)
12435 struct tg3 *tp = netdev_priv(dev);
12438 if (!netif_running(dev))
12441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12444 tg3_warn_mgmt_link_flap(tp);
12446 if (tg3_flag(tp, USE_PHYLIB)) {
12447 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12449 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12453 spin_lock_bh(&tp->lock);
12455 tg3_readphy(tp, MII_BMCR, &bmcr);
12456 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12457 ((bmcr & BMCR_ANENABLE) ||
12458 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12459 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12463 spin_unlock_bh(&tp->lock);
12469 static void tg3_get_ringparam(struct net_device *dev,
12470 struct ethtool_ringparam *ering,
12471 struct kernel_ethtool_ringparam *kernel_ering,
12472 struct netlink_ext_ack *extack)
12474 struct tg3 *tp = netdev_priv(dev);
12476 ering->rx_max_pending = tp->rx_std_ring_mask;
12477 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12478 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12480 ering->rx_jumbo_max_pending = 0;
12482 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12484 ering->rx_pending = tp->rx_pending;
12485 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12486 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12488 ering->rx_jumbo_pending = 0;
12490 ering->tx_pending = tp->napi[0].tx_pending;
12493 static int tg3_set_ringparam(struct net_device *dev,
12494 struct ethtool_ringparam *ering,
12495 struct kernel_ethtool_ringparam *kernel_ering,
12496 struct netlink_ext_ack *extack)
12498 struct tg3 *tp = netdev_priv(dev);
12499 int i, irq_sync = 0, err = 0;
12500 bool reset_phy = false;
12502 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12503 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12504 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12505 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12506 (tg3_flag(tp, TSO_BUG) &&
12507 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12510 if (netif_running(dev)) {
12512 tg3_netif_stop(tp);
12516 tg3_full_lock(tp, irq_sync);
12518 tp->rx_pending = ering->rx_pending;
12520 if (tg3_flag(tp, MAX_RXPEND_64) &&
12521 tp->rx_pending > 63)
12522 tp->rx_pending = 63;
12524 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12525 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12527 for (i = 0; i < tp->irq_max; i++)
12528 tp->napi[i].tx_pending = ering->tx_pending;
12530 if (netif_running(dev)) {
12531 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12532 /* Reset PHY to avoid PHY lock up */
12533 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12534 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12535 tg3_asic_rev(tp) == ASIC_REV_5720)
12538 err = tg3_restart_hw(tp, reset_phy);
12540 tg3_netif_start(tp);
12543 tg3_full_unlock(tp);
12545 if (irq_sync && !err)
12551 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12553 struct tg3 *tp = netdev_priv(dev);
12555 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12557 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12558 epause->rx_pause = 1;
12560 epause->rx_pause = 0;
12562 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12563 epause->tx_pause = 1;
12565 epause->tx_pause = 0;
12568 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12570 struct tg3 *tp = netdev_priv(dev);
12572 bool reset_phy = false;
12574 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12575 tg3_warn_mgmt_link_flap(tp);
12577 if (tg3_flag(tp, USE_PHYLIB)) {
12578 struct phy_device *phydev;
12580 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12582 if (!phy_validate_pause(phydev, epause))
12585 tp->link_config.flowctrl = 0;
12586 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12587 if (epause->rx_pause) {
12588 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12590 if (epause->tx_pause) {
12591 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12593 } else if (epause->tx_pause) {
12594 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12597 if (epause->autoneg)
12598 tg3_flag_set(tp, PAUSE_AUTONEG);
12600 tg3_flag_clear(tp, PAUSE_AUTONEG);
12602 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12603 if (phydev->autoneg) {
12604 /* phy_set_asym_pause() will
12605 * renegotiate the link to inform our
12606 * link partner of our flow control
12607 * settings, even if the flow control
12608 * is forced. Let tg3_adjust_link()
12609 * do the final flow control setup.
12614 if (!epause->autoneg)
12615 tg3_setup_flow_control(tp, 0, 0);
12620 if (netif_running(dev)) {
12621 tg3_netif_stop(tp);
12625 tg3_full_lock(tp, irq_sync);
12627 if (epause->autoneg)
12628 tg3_flag_set(tp, PAUSE_AUTONEG);
12630 tg3_flag_clear(tp, PAUSE_AUTONEG);
12631 if (epause->rx_pause)
12632 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12634 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12635 if (epause->tx_pause)
12636 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12638 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12640 if (netif_running(dev)) {
12641 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12642 /* Reset PHY to avoid PHY lock up */
12643 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12644 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12645 tg3_asic_rev(tp) == ASIC_REV_5720)
12648 err = tg3_restart_hw(tp, reset_phy);
12650 tg3_netif_start(tp);
12653 tg3_full_unlock(tp);
12656 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12661 static int tg3_get_sset_count(struct net_device *dev, int sset)
12665 return TG3_NUM_TEST;
12667 return TG3_NUM_STATS;
12669 return -EOPNOTSUPP;
12673 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12674 u32 *rules __always_unused)
12676 struct tg3 *tp = netdev_priv(dev);
12678 if (!tg3_flag(tp, SUPPORT_MSIX))
12679 return -EOPNOTSUPP;
12681 switch (info->cmd) {
12682 case ETHTOOL_GRXRINGS:
12683 if (netif_running(tp->dev))
12684 info->data = tp->rxq_cnt;
12686 info->data = num_online_cpus();
12687 if (info->data > TG3_RSS_MAX_NUM_QS)
12688 info->data = TG3_RSS_MAX_NUM_QS;
12694 return -EOPNOTSUPP;
12698 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12701 struct tg3 *tp = netdev_priv(dev);
12703 if (tg3_flag(tp, SUPPORT_MSIX))
12704 size = TG3_RSS_INDIR_TBL_SIZE;
12709 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12711 struct tg3 *tp = netdev_priv(dev);
12715 *hfunc = ETH_RSS_HASH_TOP;
12719 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12720 indir[i] = tp->rss_ind_tbl[i];
12725 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12728 struct tg3 *tp = netdev_priv(dev);
12731 /* We require at least one supported parameter to be changed and no
12732 * change in any of the unsupported parameters
12735 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12736 return -EOPNOTSUPP;
12741 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12742 tp->rss_ind_tbl[i] = indir[i];
12744 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12747 /* It is legal to write the indirection
12748 * table while the device is running.
12750 tg3_full_lock(tp, 0);
12751 tg3_rss_write_indir_tbl(tp);
12752 tg3_full_unlock(tp);
12757 static void tg3_get_channels(struct net_device *dev,
12758 struct ethtool_channels *channel)
12760 struct tg3 *tp = netdev_priv(dev);
12761 u32 deflt_qs = netif_get_num_default_rss_queues();
12763 channel->max_rx = tp->rxq_max;
12764 channel->max_tx = tp->txq_max;
12766 if (netif_running(dev)) {
12767 channel->rx_count = tp->rxq_cnt;
12768 channel->tx_count = tp->txq_cnt;
12771 channel->rx_count = tp->rxq_req;
12773 channel->rx_count = min(deflt_qs, tp->rxq_max);
12776 channel->tx_count = tp->txq_req;
12778 channel->tx_count = min(deflt_qs, tp->txq_max);
12782 static int tg3_set_channels(struct net_device *dev,
12783 struct ethtool_channels *channel)
12785 struct tg3 *tp = netdev_priv(dev);
12787 if (!tg3_flag(tp, SUPPORT_MSIX))
12788 return -EOPNOTSUPP;
12790 if (channel->rx_count > tp->rxq_max ||
12791 channel->tx_count > tp->txq_max)
12794 tp->rxq_req = channel->rx_count;
12795 tp->txq_req = channel->tx_count;
12797 if (!netif_running(dev))
12802 tg3_carrier_off(tp);
12804 tg3_start(tp, true, false, false);
12809 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12811 switch (stringset) {
12813 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12816 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12819 WARN_ON(1); /* we need a WARN() */
12824 static int tg3_set_phys_id(struct net_device *dev,
12825 enum ethtool_phys_id_state state)
12827 struct tg3 *tp = netdev_priv(dev);
12830 case ETHTOOL_ID_ACTIVE:
12831 return 1; /* cycle on/off once per second */
12833 case ETHTOOL_ID_ON:
12834 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12835 LED_CTRL_1000MBPS_ON |
12836 LED_CTRL_100MBPS_ON |
12837 LED_CTRL_10MBPS_ON |
12838 LED_CTRL_TRAFFIC_OVERRIDE |
12839 LED_CTRL_TRAFFIC_BLINK |
12840 LED_CTRL_TRAFFIC_LED);
12843 case ETHTOOL_ID_OFF:
12844 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12845 LED_CTRL_TRAFFIC_OVERRIDE);
12848 case ETHTOOL_ID_INACTIVE:
12849 tw32(MAC_LED_CTRL, tp->led_ctrl);
12856 static void tg3_get_ethtool_stats(struct net_device *dev,
12857 struct ethtool_stats *estats, u64 *tmp_stats)
12859 struct tg3 *tp = netdev_priv(dev);
12862 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12864 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12867 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12871 u32 offset = 0, len = 0;
12874 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12877 if (magic == TG3_EEPROM_MAGIC) {
12878 for (offset = TG3_NVM_DIR_START;
12879 offset < TG3_NVM_DIR_END;
12880 offset += TG3_NVM_DIRENT_SIZE) {
12881 if (tg3_nvram_read(tp, offset, &val))
12884 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12885 TG3_NVM_DIRTYPE_EXTVPD)
12889 if (offset != TG3_NVM_DIR_END) {
12890 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12891 if (tg3_nvram_read(tp, offset + 4, &offset))
12894 offset = tg3_nvram_logical_addr(tp, offset);
12897 if (!offset || !len) {
12898 offset = TG3_NVM_VPD_OFF;
12899 len = TG3_NVM_VPD_LEN;
12902 buf = kmalloc(len, GFP_KERNEL);
12906 for (i = 0; i < len; i += 4) {
12907 /* The data is in little-endian format in NVRAM.
12908 * Use the big-endian read routines to preserve
12909 * the byte order as it exists in NVRAM.
12911 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12916 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12928 #define NVRAM_TEST_SIZE 0x100
12929 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12930 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12931 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12932 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12933 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12934 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12935 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12936 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12938 static int tg3_test_nvram(struct tg3 *tp)
12942 int i, j, k, err = 0, size;
12945 if (tg3_flag(tp, NO_NVRAM))
12948 if (tg3_nvram_read(tp, 0, &magic) != 0)
12951 if (magic == TG3_EEPROM_MAGIC)
12952 size = NVRAM_TEST_SIZE;
12953 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12954 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12955 TG3_EEPROM_SB_FORMAT_1) {
12956 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12957 case TG3_EEPROM_SB_REVISION_0:
12958 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12960 case TG3_EEPROM_SB_REVISION_2:
12961 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12963 case TG3_EEPROM_SB_REVISION_3:
12964 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12966 case TG3_EEPROM_SB_REVISION_4:
12967 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12969 case TG3_EEPROM_SB_REVISION_5:
12970 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12972 case TG3_EEPROM_SB_REVISION_6:
12973 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12980 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12981 size = NVRAM_SELFBOOT_HW_SIZE;
12985 buf = kmalloc(size, GFP_KERNEL);
12990 for (i = 0, j = 0; i < size; i += 4, j++) {
12991 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12998 /* Selfboot format */
12999 magic = be32_to_cpu(buf[0]);
13000 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13001 TG3_EEPROM_MAGIC_FW) {
13002 u8 *buf8 = (u8 *) buf, csum8 = 0;
13004 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13005 TG3_EEPROM_SB_REVISION_2) {
13006 /* For rev 2, the csum doesn't include the MBA. */
13007 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13009 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13012 for (i = 0; i < size; i++)
13025 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13026 TG3_EEPROM_MAGIC_HW) {
13027 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13028 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13029 u8 *buf8 = (u8 *) buf;
13031 /* Separate the parity bits and the data bytes. */
13032 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13033 if ((i == 0) || (i == 8)) {
13037 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13038 parity[k++] = buf8[i] & msk;
13040 } else if (i == 16) {
13044 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13045 parity[k++] = buf8[i] & msk;
13048 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13049 parity[k++] = buf8[i] & msk;
13052 data[j++] = buf8[i];
13056 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13057 u8 hw8 = hweight8(data[i]);
13059 if ((hw8 & 0x1) && parity[i])
13061 else if (!(hw8 & 0x1) && !parity[i])
13070 /* Bootstrap checksum at offset 0x10 */
13071 csum = calc_crc((unsigned char *) buf, 0x10);
13072 if (csum != le32_to_cpu(buf[0x10/4]))
13075 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13076 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13077 if (csum != le32_to_cpu(buf[0xfc/4]))
13082 buf = tg3_vpd_readblock(tp, &len);
13086 err = pci_vpd_check_csum(buf, len);
13087 /* go on if no checksum found */
13095 #define TG3_SERDES_TIMEOUT_SEC 2
13096 #define TG3_COPPER_TIMEOUT_SEC 6
13098 static int tg3_test_link(struct tg3 *tp)
13102 if (!netif_running(tp->dev))
13105 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13106 max = TG3_SERDES_TIMEOUT_SEC;
13108 max = TG3_COPPER_TIMEOUT_SEC;
13110 for (i = 0; i < max; i++) {
13114 if (msleep_interruptible(1000))
13121 /* Only test the commonly used registers */
13122 static int tg3_test_registers(struct tg3 *tp)
13124 int i, is_5705, is_5750;
13125 u32 offset, read_mask, write_mask, val, save_val, read_val;
13129 #define TG3_FL_5705 0x1
13130 #define TG3_FL_NOT_5705 0x2
13131 #define TG3_FL_NOT_5788 0x4
13132 #define TG3_FL_NOT_5750 0x8
13136 /* MAC Control Registers */
13137 { MAC_MODE, TG3_FL_NOT_5705,
13138 0x00000000, 0x00ef6f8c },
13139 { MAC_MODE, TG3_FL_5705,
13140 0x00000000, 0x01ef6b8c },
13141 { MAC_STATUS, TG3_FL_NOT_5705,
13142 0x03800107, 0x00000000 },
13143 { MAC_STATUS, TG3_FL_5705,
13144 0x03800100, 0x00000000 },
13145 { MAC_ADDR_0_HIGH, 0x0000,
13146 0x00000000, 0x0000ffff },
13147 { MAC_ADDR_0_LOW, 0x0000,
13148 0x00000000, 0xffffffff },
13149 { MAC_RX_MTU_SIZE, 0x0000,
13150 0x00000000, 0x0000ffff },
13151 { MAC_TX_MODE, 0x0000,
13152 0x00000000, 0x00000070 },
13153 { MAC_TX_LENGTHS, 0x0000,
13154 0x00000000, 0x00003fff },
13155 { MAC_RX_MODE, TG3_FL_NOT_5705,
13156 0x00000000, 0x000007fc },
13157 { MAC_RX_MODE, TG3_FL_5705,
13158 0x00000000, 0x000007dc },
13159 { MAC_HASH_REG_0, 0x0000,
13160 0x00000000, 0xffffffff },
13161 { MAC_HASH_REG_1, 0x0000,
13162 0x00000000, 0xffffffff },
13163 { MAC_HASH_REG_2, 0x0000,
13164 0x00000000, 0xffffffff },
13165 { MAC_HASH_REG_3, 0x0000,
13166 0x00000000, 0xffffffff },
13168 /* Receive Data and Receive BD Initiator Control Registers. */
13169 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13170 0x00000000, 0xffffffff },
13171 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13172 0x00000000, 0xffffffff },
13173 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13174 0x00000000, 0x00000003 },
13175 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13176 0x00000000, 0xffffffff },
13177 { RCVDBDI_STD_BD+0, 0x0000,
13178 0x00000000, 0xffffffff },
13179 { RCVDBDI_STD_BD+4, 0x0000,
13180 0x00000000, 0xffffffff },
13181 { RCVDBDI_STD_BD+8, 0x0000,
13182 0x00000000, 0xffff0002 },
13183 { RCVDBDI_STD_BD+0xc, 0x0000,
13184 0x00000000, 0xffffffff },
13186 /* Receive BD Initiator Control Registers. */
13187 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13188 0x00000000, 0xffffffff },
13189 { RCVBDI_STD_THRESH, TG3_FL_5705,
13190 0x00000000, 0x000003ff },
13191 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13192 0x00000000, 0xffffffff },
13194 /* Host Coalescing Control Registers. */
13195 { HOSTCC_MODE, TG3_FL_NOT_5705,
13196 0x00000000, 0x00000004 },
13197 { HOSTCC_MODE, TG3_FL_5705,
13198 0x00000000, 0x000000f6 },
13199 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13200 0x00000000, 0xffffffff },
13201 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13202 0x00000000, 0x000003ff },
13203 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13204 0x00000000, 0xffffffff },
13205 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13206 0x00000000, 0x000003ff },
13207 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13208 0x00000000, 0xffffffff },
13209 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13210 0x00000000, 0x000000ff },
13211 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13212 0x00000000, 0xffffffff },
13213 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13214 0x00000000, 0x000000ff },
13215 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13216 0x00000000, 0xffffffff },
13217 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13218 0x00000000, 0xffffffff },
13219 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13220 0x00000000, 0xffffffff },
13221 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13222 0x00000000, 0x000000ff },
13223 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13224 0x00000000, 0xffffffff },
13225 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13226 0x00000000, 0x000000ff },
13227 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13228 0x00000000, 0xffffffff },
13229 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13230 0x00000000, 0xffffffff },
13231 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13232 0x00000000, 0xffffffff },
13233 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13234 0x00000000, 0xffffffff },
13235 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13236 0x00000000, 0xffffffff },
13237 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13238 0xffffffff, 0x00000000 },
13239 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13240 0xffffffff, 0x00000000 },
13242 /* Buffer Manager Control Registers. */
13243 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13244 0x00000000, 0x007fff80 },
13245 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13246 0x00000000, 0x007fffff },
13247 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13248 0x00000000, 0x0000003f },
13249 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13250 0x00000000, 0x000001ff },
13251 { BUFMGR_MB_HIGH_WATER, 0x0000,
13252 0x00000000, 0x000001ff },
13253 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13254 0xffffffff, 0x00000000 },
13255 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13256 0xffffffff, 0x00000000 },
13258 /* Mailbox Registers */
13259 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13260 0x00000000, 0x000001ff },
13261 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13262 0x00000000, 0x000001ff },
13263 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13264 0x00000000, 0x000007ff },
13265 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13266 0x00000000, 0x000001ff },
13268 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13271 is_5705 = is_5750 = 0;
13272 if (tg3_flag(tp, 5705_PLUS)) {
13274 if (tg3_flag(tp, 5750_PLUS))
13278 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13279 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13282 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13285 if (tg3_flag(tp, IS_5788) &&
13286 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13289 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13292 offset = (u32) reg_tbl[i].offset;
13293 read_mask = reg_tbl[i].read_mask;
13294 write_mask = reg_tbl[i].write_mask;
13296 /* Save the original register content */
13297 save_val = tr32(offset);
13299 /* Determine the read-only value. */
13300 read_val = save_val & read_mask;
13302 /* Write zero to the register, then make sure the read-only bits
13303 * are not changed and the read/write bits are all zeros.
13307 val = tr32(offset);
13309 /* Test the read-only and read/write bits. */
13310 if (((val & read_mask) != read_val) || (val & write_mask))
13313 /* Write ones to all the bits defined by RdMask and WrMask, then
13314 * make sure the read-only bits are not changed and the
13315 * read/write bits are all ones.
13317 tw32(offset, read_mask | write_mask);
13319 val = tr32(offset);
13321 /* Test the read-only bits. */
13322 if ((val & read_mask) != read_val)
13325 /* Test the read/write bits. */
13326 if ((val & write_mask) != write_mask)
13329 tw32(offset, save_val);
13335 if (netif_msg_hw(tp))
13336 netdev_err(tp->dev,
13337 "Register test failed at offset %x\n", offset);
13338 tw32(offset, save_val);
13342 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13344 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13348 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13349 for (j = 0; j < len; j += 4) {
13352 tg3_write_mem(tp, offset + j, test_pattern[i]);
13353 tg3_read_mem(tp, offset + j, &val);
13354 if (val != test_pattern[i])
13361 static int tg3_test_memory(struct tg3 *tp)
13363 static struct mem_entry {
13366 } mem_tbl_570x[] = {
13367 { 0x00000000, 0x00b50},
13368 { 0x00002000, 0x1c000},
13369 { 0xffffffff, 0x00000}
13370 }, mem_tbl_5705[] = {
13371 { 0x00000100, 0x0000c},
13372 { 0x00000200, 0x00008},
13373 { 0x00004000, 0x00800},
13374 { 0x00006000, 0x01000},
13375 { 0x00008000, 0x02000},
13376 { 0x00010000, 0x0e000},
13377 { 0xffffffff, 0x00000}
13378 }, mem_tbl_5755[] = {
13379 { 0x00000200, 0x00008},
13380 { 0x00004000, 0x00800},
13381 { 0x00006000, 0x00800},
13382 { 0x00008000, 0x02000},
13383 { 0x00010000, 0x0c000},
13384 { 0xffffffff, 0x00000}
13385 }, mem_tbl_5906[] = {
13386 { 0x00000200, 0x00008},
13387 { 0x00004000, 0x00400},
13388 { 0x00006000, 0x00400},
13389 { 0x00008000, 0x01000},
13390 { 0x00010000, 0x01000},
13391 { 0xffffffff, 0x00000}
13392 }, mem_tbl_5717[] = {
13393 { 0x00000200, 0x00008},
13394 { 0x00010000, 0x0a000},
13395 { 0x00020000, 0x13c00},
13396 { 0xffffffff, 0x00000}
13397 }, mem_tbl_57765[] = {
13398 { 0x00000200, 0x00008},
13399 { 0x00004000, 0x00800},
13400 { 0x00006000, 0x09800},
13401 { 0x00010000, 0x0a000},
13402 { 0xffffffff, 0x00000}
13404 struct mem_entry *mem_tbl;
13408 if (tg3_flag(tp, 5717_PLUS))
13409 mem_tbl = mem_tbl_5717;
13410 else if (tg3_flag(tp, 57765_CLASS) ||
13411 tg3_asic_rev(tp) == ASIC_REV_5762)
13412 mem_tbl = mem_tbl_57765;
13413 else if (tg3_flag(tp, 5755_PLUS))
13414 mem_tbl = mem_tbl_5755;
13415 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13416 mem_tbl = mem_tbl_5906;
13417 else if (tg3_flag(tp, 5705_PLUS))
13418 mem_tbl = mem_tbl_5705;
13420 mem_tbl = mem_tbl_570x;
13422 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13423 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13431 #define TG3_TSO_MSS 500
13433 #define TG3_TSO_IP_HDR_LEN 20
13434 #define TG3_TSO_TCP_HDR_LEN 20
13435 #define TG3_TSO_TCP_OPT_LEN 12
13437 static const u8 tg3_tso_header[] = {
13439 0x45, 0x00, 0x00, 0x00,
13440 0x00, 0x00, 0x40, 0x00,
13441 0x40, 0x06, 0x00, 0x00,
13442 0x0a, 0x00, 0x00, 0x01,
13443 0x0a, 0x00, 0x00, 0x02,
13444 0x0d, 0x00, 0xe0, 0x00,
13445 0x00, 0x00, 0x01, 0x00,
13446 0x00, 0x00, 0x02, 0x00,
13447 0x80, 0x10, 0x10, 0x00,
13448 0x14, 0x09, 0x00, 0x00,
13449 0x01, 0x01, 0x08, 0x0a,
13450 0x11, 0x11, 0x11, 0x11,
13451 0x11, 0x11, 0x11, 0x11,
13454 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13456 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13457 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13459 struct sk_buff *skb;
13460 u8 *tx_data, *rx_data;
13462 int num_pkts, tx_len, rx_len, i, err;
13463 struct tg3_rx_buffer_desc *desc;
13464 struct tg3_napi *tnapi, *rnapi;
13465 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13467 tnapi = &tp->napi[0];
13468 rnapi = &tp->napi[0];
13469 if (tp->irq_cnt > 1) {
13470 if (tg3_flag(tp, ENABLE_RSS))
13471 rnapi = &tp->napi[1];
13472 if (tg3_flag(tp, ENABLE_TSS))
13473 tnapi = &tp->napi[1];
13475 coal_now = tnapi->coal_now | rnapi->coal_now;
13480 skb = netdev_alloc_skb(tp->dev, tx_len);
13484 tx_data = skb_put(skb, tx_len);
13485 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13486 memset(tx_data + ETH_ALEN, 0x0, 8);
13488 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13490 if (tso_loopback) {
13491 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13493 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13494 TG3_TSO_TCP_OPT_LEN;
13496 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13497 sizeof(tg3_tso_header));
13500 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13501 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13503 /* Set the total length field in the IP header */
13504 iph->tot_len = htons((u16)(mss + hdr_len));
13506 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13507 TXD_FLAG_CPU_POST_DMA);
13509 if (tg3_flag(tp, HW_TSO_1) ||
13510 tg3_flag(tp, HW_TSO_2) ||
13511 tg3_flag(tp, HW_TSO_3)) {
13513 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13514 th = (struct tcphdr *)&tx_data[val];
13517 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13519 if (tg3_flag(tp, HW_TSO_3)) {
13520 mss |= (hdr_len & 0xc) << 12;
13521 if (hdr_len & 0x10)
13522 base_flags |= 0x00000010;
13523 base_flags |= (hdr_len & 0x3e0) << 5;
13524 } else if (tg3_flag(tp, HW_TSO_2))
13525 mss |= hdr_len << 9;
13526 else if (tg3_flag(tp, HW_TSO_1) ||
13527 tg3_asic_rev(tp) == ASIC_REV_5705) {
13528 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13530 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13533 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13536 data_off = ETH_HLEN;
13538 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13539 tx_len > VLAN_ETH_FRAME_LEN)
13540 base_flags |= TXD_FLAG_JMB_PKT;
13543 for (i = data_off; i < tx_len; i++)
13544 tx_data[i] = (u8) (i & 0xff);
13546 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13547 if (dma_mapping_error(&tp->pdev->dev, map)) {
13548 dev_kfree_skb(skb);
13552 val = tnapi->tx_prod;
13553 tnapi->tx_buffers[val].skb = skb;
13554 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13556 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13561 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13563 budget = tg3_tx_avail(tnapi);
13564 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13565 base_flags | TXD_FLAG_END, mss, 0)) {
13566 tnapi->tx_buffers[val].skb = NULL;
13567 dev_kfree_skb(skb);
13573 /* Sync BD data before updating mailbox */
13576 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13577 tr32_mailbox(tnapi->prodmbox);
13581 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13582 for (i = 0; i < 35; i++) {
13583 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13588 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13589 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13590 if ((tx_idx == tnapi->tx_prod) &&
13591 (rx_idx == (rx_start_idx + num_pkts)))
13595 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13596 dev_kfree_skb(skb);
13598 if (tx_idx != tnapi->tx_prod)
13601 if (rx_idx != rx_start_idx + num_pkts)
13605 while (rx_idx != rx_start_idx) {
13606 desc = &rnapi->rx_rcb[rx_start_idx++];
13607 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13608 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13610 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13611 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13614 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13617 if (!tso_loopback) {
13618 if (rx_len != tx_len)
13621 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13622 if (opaque_key != RXD_OPAQUE_RING_STD)
13625 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13628 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13629 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13630 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13634 if (opaque_key == RXD_OPAQUE_RING_STD) {
13635 rx_data = tpr->rx_std_buffers[desc_idx].data;
13636 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13638 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13639 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13640 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13645 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13648 rx_data += TG3_RX_OFFSET(tp);
13649 for (i = data_off; i < rx_len; i++, val++) {
13650 if (*(rx_data + i) != (u8) (val & 0xff))
13657 /* tg3_free_rings will unmap and free the rx_data */
13662 #define TG3_STD_LOOPBACK_FAILED 1
13663 #define TG3_JMB_LOOPBACK_FAILED 2
13664 #define TG3_TSO_LOOPBACK_FAILED 4
13665 #define TG3_LOOPBACK_FAILED \
13666 (TG3_STD_LOOPBACK_FAILED | \
13667 TG3_JMB_LOOPBACK_FAILED | \
13668 TG3_TSO_LOOPBACK_FAILED)
13670 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13674 u32 jmb_pkt_sz = 9000;
13677 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13679 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13680 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13682 if (!netif_running(tp->dev)) {
13683 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13684 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13686 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13690 err = tg3_reset_hw(tp, true);
13692 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13693 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13695 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13699 if (tg3_flag(tp, ENABLE_RSS)) {
13702 /* Reroute all rx packets to the 1st queue */
13703 for (i = MAC_RSS_INDIR_TBL_0;
13704 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13708 /* HW errata - mac loopback fails in some cases on 5780.
13709 * Normal traffic and PHY loopback are not affected by
13710 * errata. Also, the MAC loopback test is deprecated for
13711 * all newer ASIC revisions.
13713 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13714 !tg3_flag(tp, CPMU_PRESENT)) {
13715 tg3_mac_loopback(tp, true);
13717 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13720 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13721 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13722 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13724 tg3_mac_loopback(tp, false);
13727 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13728 !tg3_flag(tp, USE_PHYLIB)) {
13731 tg3_phy_lpbk_set(tp, 0, false);
13733 /* Wait for link */
13734 for (i = 0; i < 100; i++) {
13735 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13740 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13741 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13742 if (tg3_flag(tp, TSO_CAPABLE) &&
13743 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13744 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13745 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13746 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13747 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13750 tg3_phy_lpbk_set(tp, 0, true);
13752 /* All link indications report up, but the hardware
13753 * isn't really ready for about 20 msec. Double it
13758 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13759 data[TG3_EXT_LOOPB_TEST] |=
13760 TG3_STD_LOOPBACK_FAILED;
13761 if (tg3_flag(tp, TSO_CAPABLE) &&
13762 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13763 data[TG3_EXT_LOOPB_TEST] |=
13764 TG3_TSO_LOOPBACK_FAILED;
13765 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13766 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13767 data[TG3_EXT_LOOPB_TEST] |=
13768 TG3_JMB_LOOPBACK_FAILED;
13771 /* Re-enable gphy autopowerdown. */
13772 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13773 tg3_phy_toggle_apd(tp, true);
13776 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13777 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13780 tp->phy_flags |= eee_cap;
13785 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13788 struct tg3 *tp = netdev_priv(dev);
13789 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13791 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13792 if (tg3_power_up(tp)) {
13793 etest->flags |= ETH_TEST_FL_FAILED;
13794 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13797 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13800 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13802 if (tg3_test_nvram(tp) != 0) {
13803 etest->flags |= ETH_TEST_FL_FAILED;
13804 data[TG3_NVRAM_TEST] = 1;
13806 if (!doextlpbk && tg3_test_link(tp)) {
13807 etest->flags |= ETH_TEST_FL_FAILED;
13808 data[TG3_LINK_TEST] = 1;
13810 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13811 int err, err2 = 0, irq_sync = 0;
13813 if (netif_running(dev)) {
13815 tg3_netif_stop(tp);
13819 tg3_full_lock(tp, irq_sync);
13820 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13821 err = tg3_nvram_lock(tp);
13822 tg3_halt_cpu(tp, RX_CPU_BASE);
13823 if (!tg3_flag(tp, 5705_PLUS))
13824 tg3_halt_cpu(tp, TX_CPU_BASE);
13826 tg3_nvram_unlock(tp);
13828 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13831 if (tg3_test_registers(tp) != 0) {
13832 etest->flags |= ETH_TEST_FL_FAILED;
13833 data[TG3_REGISTER_TEST] = 1;
13836 if (tg3_test_memory(tp) != 0) {
13837 etest->flags |= ETH_TEST_FL_FAILED;
13838 data[TG3_MEMORY_TEST] = 1;
13842 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13844 if (tg3_test_loopback(tp, data, doextlpbk))
13845 etest->flags |= ETH_TEST_FL_FAILED;
13847 tg3_full_unlock(tp);
13849 if (tg3_test_interrupt(tp) != 0) {
13850 etest->flags |= ETH_TEST_FL_FAILED;
13851 data[TG3_INTERRUPT_TEST] = 1;
13854 tg3_full_lock(tp, 0);
13856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13857 if (netif_running(dev)) {
13858 tg3_flag_set(tp, INIT_COMPLETE);
13859 err2 = tg3_restart_hw(tp, true);
13861 tg3_netif_start(tp);
13864 tg3_full_unlock(tp);
13866 if (irq_sync && !err2)
13869 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13870 tg3_power_down_prepare(tp);
13874 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13876 struct tg3 *tp = netdev_priv(dev);
13877 struct hwtstamp_config stmpconf;
13879 if (!tg3_flag(tp, PTP_CAPABLE))
13880 return -EOPNOTSUPP;
13882 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13885 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13886 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13889 switch (stmpconf.rx_filter) {
13890 case HWTSTAMP_FILTER_NONE:
13893 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13894 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13895 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13897 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13898 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13899 TG3_RX_PTP_CTL_SYNC_EVNT;
13901 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13902 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13903 TG3_RX_PTP_CTL_DELAY_REQ;
13905 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13906 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13907 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13909 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13910 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13911 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13913 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13914 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13915 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13917 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13918 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13919 TG3_RX_PTP_CTL_SYNC_EVNT;
13921 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13922 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13923 TG3_RX_PTP_CTL_SYNC_EVNT;
13925 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13926 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13927 TG3_RX_PTP_CTL_SYNC_EVNT;
13929 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13930 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13931 TG3_RX_PTP_CTL_DELAY_REQ;
13933 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13934 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13935 TG3_RX_PTP_CTL_DELAY_REQ;
13937 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13938 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13939 TG3_RX_PTP_CTL_DELAY_REQ;
13945 if (netif_running(dev) && tp->rxptpctl)
13946 tw32(TG3_RX_PTP_CTL,
13947 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13949 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13950 tg3_flag_set(tp, TX_TSTAMP_EN);
13952 tg3_flag_clear(tp, TX_TSTAMP_EN);
13954 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13958 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13960 struct tg3 *tp = netdev_priv(dev);
13961 struct hwtstamp_config stmpconf;
13963 if (!tg3_flag(tp, PTP_CAPABLE))
13964 return -EOPNOTSUPP;
13966 stmpconf.flags = 0;
13967 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13968 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13970 switch (tp->rxptpctl) {
13972 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13974 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13975 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13977 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13978 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13980 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13981 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13983 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13984 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13986 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13987 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13989 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13990 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13992 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13993 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13995 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13996 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13998 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13999 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14001 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14002 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14004 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14005 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14007 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14008 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14015 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14019 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14021 struct mii_ioctl_data *data = if_mii(ifr);
14022 struct tg3 *tp = netdev_priv(dev);
14025 if (tg3_flag(tp, USE_PHYLIB)) {
14026 struct phy_device *phydev;
14027 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14029 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14030 return phy_mii_ioctl(phydev, ifr, cmd);
14035 data->phy_id = tp->phy_addr;
14038 case SIOCGMIIREG: {
14041 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14042 break; /* We have no PHY */
14044 if (!netif_running(dev))
14047 spin_lock_bh(&tp->lock);
14048 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14049 data->reg_num & 0x1f, &mii_regval);
14050 spin_unlock_bh(&tp->lock);
14052 data->val_out = mii_regval;
14058 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14059 break; /* We have no PHY */
14061 if (!netif_running(dev))
14064 spin_lock_bh(&tp->lock);
14065 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14066 data->reg_num & 0x1f, data->val_in);
14067 spin_unlock_bh(&tp->lock);
14071 case SIOCSHWTSTAMP:
14072 return tg3_hwtstamp_set(dev, ifr);
14074 case SIOCGHWTSTAMP:
14075 return tg3_hwtstamp_get(dev, ifr);
14081 return -EOPNOTSUPP;
14084 static int tg3_get_coalesce(struct net_device *dev,
14085 struct ethtool_coalesce *ec,
14086 struct kernel_ethtool_coalesce *kernel_coal,
14087 struct netlink_ext_ack *extack)
14089 struct tg3 *tp = netdev_priv(dev);
14091 memcpy(ec, &tp->coal, sizeof(*ec));
14095 static int tg3_set_coalesce(struct net_device *dev,
14096 struct ethtool_coalesce *ec,
14097 struct kernel_ethtool_coalesce *kernel_coal,
14098 struct netlink_ext_ack *extack)
14100 struct tg3 *tp = netdev_priv(dev);
14101 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14102 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14104 if (!tg3_flag(tp, 5705_PLUS)) {
14105 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14106 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14107 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14108 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14111 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14112 (!ec->rx_coalesce_usecs) ||
14113 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14114 (!ec->tx_coalesce_usecs) ||
14115 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14116 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14117 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14118 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14119 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14120 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14121 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14122 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14125 /* Only copy relevant parameters, ignore all others. */
14126 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14127 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14128 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14129 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14130 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14131 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14132 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14133 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14134 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14136 if (netif_running(dev)) {
14137 tg3_full_lock(tp, 0);
14138 __tg3_set_coalesce(tp, &tp->coal);
14139 tg3_full_unlock(tp);
14144 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14146 struct tg3 *tp = netdev_priv(dev);
14148 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14149 netdev_warn(tp->dev, "Board does not support EEE!\n");
14150 return -EOPNOTSUPP;
14153 if (edata->advertised != tp->eee.advertised) {
14154 netdev_warn(tp->dev,
14155 "Direct manipulation of EEE advertisement is not supported\n");
14159 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14160 netdev_warn(tp->dev,
14161 "Maximal Tx Lpi timer supported is %#x(u)\n",
14162 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14168 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14169 tg3_warn_mgmt_link_flap(tp);
14171 if (netif_running(tp->dev)) {
14172 tg3_full_lock(tp, 0);
14175 tg3_full_unlock(tp);
14181 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14183 struct tg3 *tp = netdev_priv(dev);
14185 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14186 netdev_warn(tp->dev,
14187 "Board does not support EEE!\n");
14188 return -EOPNOTSUPP;
14195 static const struct ethtool_ops tg3_ethtool_ops = {
14196 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14197 ETHTOOL_COALESCE_MAX_FRAMES |
14198 ETHTOOL_COALESCE_USECS_IRQ |
14199 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14200 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14201 .get_drvinfo = tg3_get_drvinfo,
14202 .get_regs_len = tg3_get_regs_len,
14203 .get_regs = tg3_get_regs,
14204 .get_wol = tg3_get_wol,
14205 .set_wol = tg3_set_wol,
14206 .get_msglevel = tg3_get_msglevel,
14207 .set_msglevel = tg3_set_msglevel,
14208 .nway_reset = tg3_nway_reset,
14209 .get_link = ethtool_op_get_link,
14210 .get_eeprom_len = tg3_get_eeprom_len,
14211 .get_eeprom = tg3_get_eeprom,
14212 .set_eeprom = tg3_set_eeprom,
14213 .get_ringparam = tg3_get_ringparam,
14214 .set_ringparam = tg3_set_ringparam,
14215 .get_pauseparam = tg3_get_pauseparam,
14216 .set_pauseparam = tg3_set_pauseparam,
14217 .self_test = tg3_self_test,
14218 .get_strings = tg3_get_strings,
14219 .set_phys_id = tg3_set_phys_id,
14220 .get_ethtool_stats = tg3_get_ethtool_stats,
14221 .get_coalesce = tg3_get_coalesce,
14222 .set_coalesce = tg3_set_coalesce,
14223 .get_sset_count = tg3_get_sset_count,
14224 .get_rxnfc = tg3_get_rxnfc,
14225 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14226 .get_rxfh = tg3_get_rxfh,
14227 .set_rxfh = tg3_set_rxfh,
14228 .get_channels = tg3_get_channels,
14229 .set_channels = tg3_set_channels,
14230 .get_ts_info = tg3_get_ts_info,
14231 .get_eee = tg3_get_eee,
14232 .set_eee = tg3_set_eee,
14233 .get_link_ksettings = tg3_get_link_ksettings,
14234 .set_link_ksettings = tg3_set_link_ksettings,
14237 static void tg3_get_stats64(struct net_device *dev,
14238 struct rtnl_link_stats64 *stats)
14240 struct tg3 *tp = netdev_priv(dev);
14242 spin_lock_bh(&tp->lock);
14243 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14244 *stats = tp->net_stats_prev;
14245 spin_unlock_bh(&tp->lock);
14249 tg3_get_nstats(tp, stats);
14250 spin_unlock_bh(&tp->lock);
14253 static void tg3_set_rx_mode(struct net_device *dev)
14255 struct tg3 *tp = netdev_priv(dev);
14257 if (!netif_running(dev))
14260 tg3_full_lock(tp, 0);
14261 __tg3_set_rx_mode(dev);
14262 tg3_full_unlock(tp);
14265 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14268 dev->mtu = new_mtu;
14270 if (new_mtu > ETH_DATA_LEN) {
14271 if (tg3_flag(tp, 5780_CLASS)) {
14272 netdev_update_features(dev);
14273 tg3_flag_clear(tp, TSO_CAPABLE);
14275 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14278 if (tg3_flag(tp, 5780_CLASS)) {
14279 tg3_flag_set(tp, TSO_CAPABLE);
14280 netdev_update_features(dev);
14282 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14286 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14288 struct tg3 *tp = netdev_priv(dev);
14290 bool reset_phy = false;
14292 if (!netif_running(dev)) {
14293 /* We'll just catch it later when the
14296 tg3_set_mtu(dev, tp, new_mtu);
14302 tg3_netif_stop(tp);
14304 tg3_set_mtu(dev, tp, new_mtu);
14306 tg3_full_lock(tp, 1);
14308 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14310 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14311 * breaks all requests to 256 bytes.
14313 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14314 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14315 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14316 tg3_asic_rev(tp) == ASIC_REV_5720)
14319 err = tg3_restart_hw(tp, reset_phy);
14322 tg3_netif_start(tp);
14324 tg3_full_unlock(tp);
14332 static const struct net_device_ops tg3_netdev_ops = {
14333 .ndo_open = tg3_open,
14334 .ndo_stop = tg3_close,
14335 .ndo_start_xmit = tg3_start_xmit,
14336 .ndo_get_stats64 = tg3_get_stats64,
14337 .ndo_validate_addr = eth_validate_addr,
14338 .ndo_set_rx_mode = tg3_set_rx_mode,
14339 .ndo_set_mac_address = tg3_set_mac_addr,
14340 .ndo_eth_ioctl = tg3_ioctl,
14341 .ndo_tx_timeout = tg3_tx_timeout,
14342 .ndo_change_mtu = tg3_change_mtu,
14343 .ndo_fix_features = tg3_fix_features,
14344 .ndo_set_features = tg3_set_features,
14345 #ifdef CONFIG_NET_POLL_CONTROLLER
14346 .ndo_poll_controller = tg3_poll_controller,
14350 static void tg3_get_eeprom_size(struct tg3 *tp)
14352 u32 cursize, val, magic;
14354 tp->nvram_size = EEPROM_CHIP_SIZE;
14356 if (tg3_nvram_read(tp, 0, &magic) != 0)
14359 if ((magic != TG3_EEPROM_MAGIC) &&
14360 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14361 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14365 * Size the chip by reading offsets at increasing powers of two.
14366 * When we encounter our validation signature, we know the addressing
14367 * has wrapped around, and thus have our chip size.
14371 while (cursize < tp->nvram_size) {
14372 if (tg3_nvram_read(tp, cursize, &val) != 0)
14381 tp->nvram_size = cursize;
14384 static void tg3_get_nvram_size(struct tg3 *tp)
14388 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14391 /* Selfboot format */
14392 if (val != TG3_EEPROM_MAGIC) {
14393 tg3_get_eeprom_size(tp);
14397 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14399 /* This is confusing. We want to operate on the
14400 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14401 * call will read from NVRAM and byteswap the data
14402 * according to the byteswapping settings for all
14403 * other register accesses. This ensures the data we
14404 * want will always reside in the lower 16-bits.
14405 * However, the data in NVRAM is in LE format, which
14406 * means the data from the NVRAM read will always be
14407 * opposite the endianness of the CPU. The 16-bit
14408 * byteswap then brings the data to CPU endianness.
14410 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14414 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14417 static void tg3_get_nvram_info(struct tg3 *tp)
14421 nvcfg1 = tr32(NVRAM_CFG1);
14422 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14423 tg3_flag_set(tp, FLASH);
14425 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14426 tw32(NVRAM_CFG1, nvcfg1);
14429 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14430 tg3_flag(tp, 5780_CLASS)) {
14431 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14432 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14433 tp->nvram_jedecnum = JEDEC_ATMEL;
14434 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14435 tg3_flag_set(tp, NVRAM_BUFFERED);
14437 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14438 tp->nvram_jedecnum = JEDEC_ATMEL;
14439 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14441 case FLASH_VENDOR_ATMEL_EEPROM:
14442 tp->nvram_jedecnum = JEDEC_ATMEL;
14443 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14444 tg3_flag_set(tp, NVRAM_BUFFERED);
14446 case FLASH_VENDOR_ST:
14447 tp->nvram_jedecnum = JEDEC_ST;
14448 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14449 tg3_flag_set(tp, NVRAM_BUFFERED);
14451 case FLASH_VENDOR_SAIFUN:
14452 tp->nvram_jedecnum = JEDEC_SAIFUN;
14453 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14455 case FLASH_VENDOR_SST_SMALL:
14456 case FLASH_VENDOR_SST_LARGE:
14457 tp->nvram_jedecnum = JEDEC_SST;
14458 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14462 tp->nvram_jedecnum = JEDEC_ATMEL;
14463 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14464 tg3_flag_set(tp, NVRAM_BUFFERED);
14468 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14470 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14471 case FLASH_5752PAGE_SIZE_256:
14472 tp->nvram_pagesize = 256;
14474 case FLASH_5752PAGE_SIZE_512:
14475 tp->nvram_pagesize = 512;
14477 case FLASH_5752PAGE_SIZE_1K:
14478 tp->nvram_pagesize = 1024;
14480 case FLASH_5752PAGE_SIZE_2K:
14481 tp->nvram_pagesize = 2048;
14483 case FLASH_5752PAGE_SIZE_4K:
14484 tp->nvram_pagesize = 4096;
14486 case FLASH_5752PAGE_SIZE_264:
14487 tp->nvram_pagesize = 264;
14489 case FLASH_5752PAGE_SIZE_528:
14490 tp->nvram_pagesize = 528;
14495 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14499 nvcfg1 = tr32(NVRAM_CFG1);
14501 /* NVRAM protection for TPM */
14502 if (nvcfg1 & (1 << 27))
14503 tg3_flag_set(tp, PROTECTED_NVRAM);
14505 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14506 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14507 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14508 tp->nvram_jedecnum = JEDEC_ATMEL;
14509 tg3_flag_set(tp, NVRAM_BUFFERED);
14511 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14512 tp->nvram_jedecnum = JEDEC_ATMEL;
14513 tg3_flag_set(tp, NVRAM_BUFFERED);
14514 tg3_flag_set(tp, FLASH);
14516 case FLASH_5752VENDOR_ST_M45PE10:
14517 case FLASH_5752VENDOR_ST_M45PE20:
14518 case FLASH_5752VENDOR_ST_M45PE40:
14519 tp->nvram_jedecnum = JEDEC_ST;
14520 tg3_flag_set(tp, NVRAM_BUFFERED);
14521 tg3_flag_set(tp, FLASH);
14525 if (tg3_flag(tp, FLASH)) {
14526 tg3_nvram_get_pagesize(tp, nvcfg1);
14528 /* For eeprom, set pagesize to maximum eeprom size */
14529 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14531 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14532 tw32(NVRAM_CFG1, nvcfg1);
14536 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14538 u32 nvcfg1, protect = 0;
14540 nvcfg1 = tr32(NVRAM_CFG1);
14542 /* NVRAM protection for TPM */
14543 if (nvcfg1 & (1 << 27)) {
14544 tg3_flag_set(tp, PROTECTED_NVRAM);
14548 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14550 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14551 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14552 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14553 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14554 tp->nvram_jedecnum = JEDEC_ATMEL;
14555 tg3_flag_set(tp, NVRAM_BUFFERED);
14556 tg3_flag_set(tp, FLASH);
14557 tp->nvram_pagesize = 264;
14558 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14559 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14560 tp->nvram_size = (protect ? 0x3e200 :
14561 TG3_NVRAM_SIZE_512KB);
14562 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14563 tp->nvram_size = (protect ? 0x1f200 :
14564 TG3_NVRAM_SIZE_256KB);
14566 tp->nvram_size = (protect ? 0x1f200 :
14567 TG3_NVRAM_SIZE_128KB);
14569 case FLASH_5752VENDOR_ST_M45PE10:
14570 case FLASH_5752VENDOR_ST_M45PE20:
14571 case FLASH_5752VENDOR_ST_M45PE40:
14572 tp->nvram_jedecnum = JEDEC_ST;
14573 tg3_flag_set(tp, NVRAM_BUFFERED);
14574 tg3_flag_set(tp, FLASH);
14575 tp->nvram_pagesize = 256;
14576 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14577 tp->nvram_size = (protect ?
14578 TG3_NVRAM_SIZE_64KB :
14579 TG3_NVRAM_SIZE_128KB);
14580 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14581 tp->nvram_size = (protect ?
14582 TG3_NVRAM_SIZE_64KB :
14583 TG3_NVRAM_SIZE_256KB);
14585 tp->nvram_size = (protect ?
14586 TG3_NVRAM_SIZE_128KB :
14587 TG3_NVRAM_SIZE_512KB);
14592 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14596 nvcfg1 = tr32(NVRAM_CFG1);
14598 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14599 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14600 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14601 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14602 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14603 tp->nvram_jedecnum = JEDEC_ATMEL;
14604 tg3_flag_set(tp, NVRAM_BUFFERED);
14605 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14607 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14608 tw32(NVRAM_CFG1, nvcfg1);
14610 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14611 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14612 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14613 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14614 tp->nvram_jedecnum = JEDEC_ATMEL;
14615 tg3_flag_set(tp, NVRAM_BUFFERED);
14616 tg3_flag_set(tp, FLASH);
14617 tp->nvram_pagesize = 264;
14619 case FLASH_5752VENDOR_ST_M45PE10:
14620 case FLASH_5752VENDOR_ST_M45PE20:
14621 case FLASH_5752VENDOR_ST_M45PE40:
14622 tp->nvram_jedecnum = JEDEC_ST;
14623 tg3_flag_set(tp, NVRAM_BUFFERED);
14624 tg3_flag_set(tp, FLASH);
14625 tp->nvram_pagesize = 256;
14630 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14632 u32 nvcfg1, protect = 0;
14634 nvcfg1 = tr32(NVRAM_CFG1);
14636 /* NVRAM protection for TPM */
14637 if (nvcfg1 & (1 << 27)) {
14638 tg3_flag_set(tp, PROTECTED_NVRAM);
14642 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14644 case FLASH_5761VENDOR_ATMEL_ADB021D:
14645 case FLASH_5761VENDOR_ATMEL_ADB041D:
14646 case FLASH_5761VENDOR_ATMEL_ADB081D:
14647 case FLASH_5761VENDOR_ATMEL_ADB161D:
14648 case FLASH_5761VENDOR_ATMEL_MDB021D:
14649 case FLASH_5761VENDOR_ATMEL_MDB041D:
14650 case FLASH_5761VENDOR_ATMEL_MDB081D:
14651 case FLASH_5761VENDOR_ATMEL_MDB161D:
14652 tp->nvram_jedecnum = JEDEC_ATMEL;
14653 tg3_flag_set(tp, NVRAM_BUFFERED);
14654 tg3_flag_set(tp, FLASH);
14655 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14656 tp->nvram_pagesize = 256;
14658 case FLASH_5761VENDOR_ST_A_M45PE20:
14659 case FLASH_5761VENDOR_ST_A_M45PE40:
14660 case FLASH_5761VENDOR_ST_A_M45PE80:
14661 case FLASH_5761VENDOR_ST_A_M45PE16:
14662 case FLASH_5761VENDOR_ST_M_M45PE20:
14663 case FLASH_5761VENDOR_ST_M_M45PE40:
14664 case FLASH_5761VENDOR_ST_M_M45PE80:
14665 case FLASH_5761VENDOR_ST_M_M45PE16:
14666 tp->nvram_jedecnum = JEDEC_ST;
14667 tg3_flag_set(tp, NVRAM_BUFFERED);
14668 tg3_flag_set(tp, FLASH);
14669 tp->nvram_pagesize = 256;
14674 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14677 case FLASH_5761VENDOR_ATMEL_ADB161D:
14678 case FLASH_5761VENDOR_ATMEL_MDB161D:
14679 case FLASH_5761VENDOR_ST_A_M45PE16:
14680 case FLASH_5761VENDOR_ST_M_M45PE16:
14681 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14683 case FLASH_5761VENDOR_ATMEL_ADB081D:
14684 case FLASH_5761VENDOR_ATMEL_MDB081D:
14685 case FLASH_5761VENDOR_ST_A_M45PE80:
14686 case FLASH_5761VENDOR_ST_M_M45PE80:
14687 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14689 case FLASH_5761VENDOR_ATMEL_ADB041D:
14690 case FLASH_5761VENDOR_ATMEL_MDB041D:
14691 case FLASH_5761VENDOR_ST_A_M45PE40:
14692 case FLASH_5761VENDOR_ST_M_M45PE40:
14693 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14695 case FLASH_5761VENDOR_ATMEL_ADB021D:
14696 case FLASH_5761VENDOR_ATMEL_MDB021D:
14697 case FLASH_5761VENDOR_ST_A_M45PE20:
14698 case FLASH_5761VENDOR_ST_M_M45PE20:
14699 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14705 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14707 tp->nvram_jedecnum = JEDEC_ATMEL;
14708 tg3_flag_set(tp, NVRAM_BUFFERED);
14709 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14712 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14716 nvcfg1 = tr32(NVRAM_CFG1);
14718 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14720 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14721 tp->nvram_jedecnum = JEDEC_ATMEL;
14722 tg3_flag_set(tp, NVRAM_BUFFERED);
14723 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14725 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726 tw32(NVRAM_CFG1, nvcfg1);
14728 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14729 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14730 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14731 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14732 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14733 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14734 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14735 tp->nvram_jedecnum = JEDEC_ATMEL;
14736 tg3_flag_set(tp, NVRAM_BUFFERED);
14737 tg3_flag_set(tp, FLASH);
14739 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14741 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14742 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14743 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14745 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14746 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14747 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14749 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14750 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14751 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14755 case FLASH_5752VENDOR_ST_M45PE10:
14756 case FLASH_5752VENDOR_ST_M45PE20:
14757 case FLASH_5752VENDOR_ST_M45PE40:
14758 tp->nvram_jedecnum = JEDEC_ST;
14759 tg3_flag_set(tp, NVRAM_BUFFERED);
14760 tg3_flag_set(tp, FLASH);
14762 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14763 case FLASH_5752VENDOR_ST_M45PE10:
14764 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14766 case FLASH_5752VENDOR_ST_M45PE20:
14767 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14769 case FLASH_5752VENDOR_ST_M45PE40:
14770 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14775 tg3_flag_set(tp, NO_NVRAM);
14779 tg3_nvram_get_pagesize(tp, nvcfg1);
14780 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14781 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14785 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14789 nvcfg1 = tr32(NVRAM_CFG1);
14791 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14792 case FLASH_5717VENDOR_ATMEL_EEPROM:
14793 case FLASH_5717VENDOR_MICRO_EEPROM:
14794 tp->nvram_jedecnum = JEDEC_ATMEL;
14795 tg3_flag_set(tp, NVRAM_BUFFERED);
14796 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14798 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14799 tw32(NVRAM_CFG1, nvcfg1);
14801 case FLASH_5717VENDOR_ATMEL_MDB011D:
14802 case FLASH_5717VENDOR_ATMEL_ADB011B:
14803 case FLASH_5717VENDOR_ATMEL_ADB011D:
14804 case FLASH_5717VENDOR_ATMEL_MDB021D:
14805 case FLASH_5717VENDOR_ATMEL_ADB021B:
14806 case FLASH_5717VENDOR_ATMEL_ADB021D:
14807 case FLASH_5717VENDOR_ATMEL_45USPT:
14808 tp->nvram_jedecnum = JEDEC_ATMEL;
14809 tg3_flag_set(tp, NVRAM_BUFFERED);
14810 tg3_flag_set(tp, FLASH);
14812 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14813 case FLASH_5717VENDOR_ATMEL_MDB021D:
14814 /* Detect size with tg3_nvram_get_size() */
14816 case FLASH_5717VENDOR_ATMEL_ADB021B:
14817 case FLASH_5717VENDOR_ATMEL_ADB021D:
14818 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14821 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14825 case FLASH_5717VENDOR_ST_M_M25PE10:
14826 case FLASH_5717VENDOR_ST_A_M25PE10:
14827 case FLASH_5717VENDOR_ST_M_M45PE10:
14828 case FLASH_5717VENDOR_ST_A_M45PE10:
14829 case FLASH_5717VENDOR_ST_M_M25PE20:
14830 case FLASH_5717VENDOR_ST_A_M25PE20:
14831 case FLASH_5717VENDOR_ST_M_M45PE20:
14832 case FLASH_5717VENDOR_ST_A_M45PE20:
14833 case FLASH_5717VENDOR_ST_25USPT:
14834 case FLASH_5717VENDOR_ST_45USPT:
14835 tp->nvram_jedecnum = JEDEC_ST;
14836 tg3_flag_set(tp, NVRAM_BUFFERED);
14837 tg3_flag_set(tp, FLASH);
14839 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14840 case FLASH_5717VENDOR_ST_M_M25PE20:
14841 case FLASH_5717VENDOR_ST_M_M45PE20:
14842 /* Detect size with tg3_nvram_get_size() */
14844 case FLASH_5717VENDOR_ST_A_M25PE20:
14845 case FLASH_5717VENDOR_ST_A_M45PE20:
14846 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14849 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14854 tg3_flag_set(tp, NO_NVRAM);
14858 tg3_nvram_get_pagesize(tp, nvcfg1);
14859 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14860 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14863 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14865 u32 nvcfg1, nvmpinstrp, nv_status;
14867 nvcfg1 = tr32(NVRAM_CFG1);
14868 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14870 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14871 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14872 tg3_flag_set(tp, NO_NVRAM);
14876 switch (nvmpinstrp) {
14877 case FLASH_5762_MX25L_100:
14878 case FLASH_5762_MX25L_200:
14879 case FLASH_5762_MX25L_400:
14880 case FLASH_5762_MX25L_800:
14881 case FLASH_5762_MX25L_160_320:
14882 tp->nvram_pagesize = 4096;
14883 tp->nvram_jedecnum = JEDEC_MACRONIX;
14884 tg3_flag_set(tp, NVRAM_BUFFERED);
14885 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14886 tg3_flag_set(tp, FLASH);
14887 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14889 (1 << (nv_status >> AUTOSENSE_DEVID &
14890 AUTOSENSE_DEVID_MASK)
14891 << AUTOSENSE_SIZE_IN_MB);
14894 case FLASH_5762_EEPROM_HD:
14895 nvmpinstrp = FLASH_5720_EEPROM_HD;
14897 case FLASH_5762_EEPROM_LD:
14898 nvmpinstrp = FLASH_5720_EEPROM_LD;
14900 case FLASH_5720VENDOR_M_ST_M45PE20:
14901 /* This pinstrap supports multiple sizes, so force it
14902 * to read the actual size from location 0xf0.
14904 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14909 switch (nvmpinstrp) {
14910 case FLASH_5720_EEPROM_HD:
14911 case FLASH_5720_EEPROM_LD:
14912 tp->nvram_jedecnum = JEDEC_ATMEL;
14913 tg3_flag_set(tp, NVRAM_BUFFERED);
14915 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14916 tw32(NVRAM_CFG1, nvcfg1);
14917 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14918 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14920 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14922 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14923 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14924 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14925 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14926 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14927 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14928 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14929 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14930 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14931 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14932 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14933 case FLASH_5720VENDOR_ATMEL_45USPT:
14934 tp->nvram_jedecnum = JEDEC_ATMEL;
14935 tg3_flag_set(tp, NVRAM_BUFFERED);
14936 tg3_flag_set(tp, FLASH);
14938 switch (nvmpinstrp) {
14939 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14940 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14941 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14942 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14944 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14945 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14946 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14947 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14949 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14950 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14951 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14954 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14959 case FLASH_5720VENDOR_M_ST_M25PE10:
14960 case FLASH_5720VENDOR_M_ST_M45PE10:
14961 case FLASH_5720VENDOR_A_ST_M25PE10:
14962 case FLASH_5720VENDOR_A_ST_M45PE10:
14963 case FLASH_5720VENDOR_M_ST_M25PE20:
14964 case FLASH_5720VENDOR_M_ST_M45PE20:
14965 case FLASH_5720VENDOR_A_ST_M25PE20:
14966 case FLASH_5720VENDOR_A_ST_M45PE20:
14967 case FLASH_5720VENDOR_M_ST_M25PE40:
14968 case FLASH_5720VENDOR_M_ST_M45PE40:
14969 case FLASH_5720VENDOR_A_ST_M25PE40:
14970 case FLASH_5720VENDOR_A_ST_M45PE40:
14971 case FLASH_5720VENDOR_M_ST_M25PE80:
14972 case FLASH_5720VENDOR_M_ST_M45PE80:
14973 case FLASH_5720VENDOR_A_ST_M25PE80:
14974 case FLASH_5720VENDOR_A_ST_M45PE80:
14975 case FLASH_5720VENDOR_ST_25USPT:
14976 case FLASH_5720VENDOR_ST_45USPT:
14977 tp->nvram_jedecnum = JEDEC_ST;
14978 tg3_flag_set(tp, NVRAM_BUFFERED);
14979 tg3_flag_set(tp, FLASH);
14981 switch (nvmpinstrp) {
14982 case FLASH_5720VENDOR_M_ST_M25PE20:
14983 case FLASH_5720VENDOR_M_ST_M45PE20:
14984 case FLASH_5720VENDOR_A_ST_M25PE20:
14985 case FLASH_5720VENDOR_A_ST_M45PE20:
14986 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14988 case FLASH_5720VENDOR_M_ST_M25PE40:
14989 case FLASH_5720VENDOR_M_ST_M45PE40:
14990 case FLASH_5720VENDOR_A_ST_M25PE40:
14991 case FLASH_5720VENDOR_A_ST_M45PE40:
14992 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14994 case FLASH_5720VENDOR_M_ST_M25PE80:
14995 case FLASH_5720VENDOR_M_ST_M45PE80:
14996 case FLASH_5720VENDOR_A_ST_M25PE80:
14997 case FLASH_5720VENDOR_A_ST_M45PE80:
14998 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15001 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15002 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15007 tg3_flag_set(tp, NO_NVRAM);
15011 tg3_nvram_get_pagesize(tp, nvcfg1);
15012 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15013 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15015 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15018 if (tg3_nvram_read(tp, 0, &val))
15021 if (val != TG3_EEPROM_MAGIC &&
15022 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15023 tg3_flag_set(tp, NO_NVRAM);
15027 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15028 static void tg3_nvram_init(struct tg3 *tp)
15030 if (tg3_flag(tp, IS_SSB_CORE)) {
15031 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15032 tg3_flag_clear(tp, NVRAM);
15033 tg3_flag_clear(tp, NVRAM_BUFFERED);
15034 tg3_flag_set(tp, NO_NVRAM);
15038 tw32_f(GRC_EEPROM_ADDR,
15039 (EEPROM_ADDR_FSM_RESET |
15040 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15041 EEPROM_ADDR_CLKPERD_SHIFT)));
15045 /* Enable seeprom accesses. */
15046 tw32_f(GRC_LOCAL_CTRL,
15047 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15050 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15051 tg3_asic_rev(tp) != ASIC_REV_5701) {
15052 tg3_flag_set(tp, NVRAM);
15054 if (tg3_nvram_lock(tp)) {
15055 netdev_warn(tp->dev,
15056 "Cannot get nvram lock, %s failed\n",
15060 tg3_enable_nvram_access(tp);
15062 tp->nvram_size = 0;
15064 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15065 tg3_get_5752_nvram_info(tp);
15066 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15067 tg3_get_5755_nvram_info(tp);
15068 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15069 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15070 tg3_asic_rev(tp) == ASIC_REV_5785)
15071 tg3_get_5787_nvram_info(tp);
15072 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15073 tg3_get_5761_nvram_info(tp);
15074 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15075 tg3_get_5906_nvram_info(tp);
15076 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15077 tg3_flag(tp, 57765_CLASS))
15078 tg3_get_57780_nvram_info(tp);
15079 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15080 tg3_asic_rev(tp) == ASIC_REV_5719)
15081 tg3_get_5717_nvram_info(tp);
15082 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15083 tg3_asic_rev(tp) == ASIC_REV_5762)
15084 tg3_get_5720_nvram_info(tp);
15086 tg3_get_nvram_info(tp);
15088 if (tp->nvram_size == 0)
15089 tg3_get_nvram_size(tp);
15091 tg3_disable_nvram_access(tp);
15092 tg3_nvram_unlock(tp);
15095 tg3_flag_clear(tp, NVRAM);
15096 tg3_flag_clear(tp, NVRAM_BUFFERED);
15098 tg3_get_eeprom_size(tp);
15102 struct subsys_tbl_ent {
15103 u16 subsys_vendor, subsys_devid;
15107 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15108 /* Broadcom boards. */
15109 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15110 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15111 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15112 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15113 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15114 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15115 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15116 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15117 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15118 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15119 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15120 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15121 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15122 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15123 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15124 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15125 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15126 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15127 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15128 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15129 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15130 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15133 { TG3PCI_SUBVENDOR_ID_3COM,
15134 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15135 { TG3PCI_SUBVENDOR_ID_3COM,
15136 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15137 { TG3PCI_SUBVENDOR_ID_3COM,
15138 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15139 { TG3PCI_SUBVENDOR_ID_3COM,
15140 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15141 { TG3PCI_SUBVENDOR_ID_3COM,
15142 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15145 { TG3PCI_SUBVENDOR_ID_DELL,
15146 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15147 { TG3PCI_SUBVENDOR_ID_DELL,
15148 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15149 { TG3PCI_SUBVENDOR_ID_DELL,
15150 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15151 { TG3PCI_SUBVENDOR_ID_DELL,
15152 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15154 /* Compaq boards. */
15155 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15156 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15157 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15158 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15159 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15160 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15161 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15162 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15163 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15164 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15167 { TG3PCI_SUBVENDOR_ID_IBM,
15168 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15171 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15175 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15176 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15177 tp->pdev->subsystem_vendor) &&
15178 (subsys_id_to_phy_id[i].subsys_devid ==
15179 tp->pdev->subsystem_device))
15180 return &subsys_id_to_phy_id[i];
15185 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15189 tp->phy_id = TG3_PHY_ID_INVALID;
15190 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15192 /* Assume an onboard device and WOL capable by default. */
15193 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15194 tg3_flag_set(tp, WOL_CAP);
15196 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15197 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15198 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15199 tg3_flag_set(tp, IS_NIC);
15201 val = tr32(VCPU_CFGSHDW);
15202 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15203 tg3_flag_set(tp, ASPM_WORKAROUND);
15204 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15205 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15206 tg3_flag_set(tp, WOL_ENABLE);
15207 device_set_wakeup_enable(&tp->pdev->dev, true);
15212 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15213 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15214 u32 nic_cfg, led_cfg;
15215 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15216 u32 nic_phy_id, ver, eeprom_phy_id;
15217 int eeprom_phy_serdes = 0;
15219 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15220 tp->nic_sram_data_cfg = nic_cfg;
15222 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15223 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15224 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15225 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15226 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15227 (ver > 0) && (ver < 0x100))
15228 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15230 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15231 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15233 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15234 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15235 tg3_asic_rev(tp) == ASIC_REV_5720)
15236 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15238 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15239 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15240 eeprom_phy_serdes = 1;
15242 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15243 if (nic_phy_id != 0) {
15244 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15245 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15247 eeprom_phy_id = (id1 >> 16) << 10;
15248 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15249 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15253 tp->phy_id = eeprom_phy_id;
15254 if (eeprom_phy_serdes) {
15255 if (!tg3_flag(tp, 5705_PLUS))
15256 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15258 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15261 if (tg3_flag(tp, 5750_PLUS))
15262 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15263 SHASTA_EXT_LED_MODE_MASK);
15265 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15269 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15270 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15273 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15274 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15277 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15278 tp->led_ctrl = LED_CTRL_MODE_MAC;
15280 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15281 * read on some older 5700/5701 bootcode.
15283 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15284 tg3_asic_rev(tp) == ASIC_REV_5701)
15285 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15289 case SHASTA_EXT_LED_SHARED:
15290 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15291 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15292 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15293 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15294 LED_CTRL_MODE_PHY_2);
15296 if (tg3_flag(tp, 5717_PLUS) ||
15297 tg3_asic_rev(tp) == ASIC_REV_5762)
15298 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15299 LED_CTRL_BLINK_RATE_MASK;
15303 case SHASTA_EXT_LED_MAC:
15304 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15307 case SHASTA_EXT_LED_COMBO:
15308 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15309 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15310 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15311 LED_CTRL_MODE_PHY_2);
15316 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15317 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15318 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15319 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15321 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15322 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15324 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15325 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15326 if ((tp->pdev->subsystem_vendor ==
15327 PCI_VENDOR_ID_ARIMA) &&
15328 (tp->pdev->subsystem_device == 0x205a ||
15329 tp->pdev->subsystem_device == 0x2063))
15330 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15332 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15333 tg3_flag_set(tp, IS_NIC);
15336 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15337 tg3_flag_set(tp, ENABLE_ASF);
15338 if (tg3_flag(tp, 5750_PLUS))
15339 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15342 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15343 tg3_flag(tp, 5750_PLUS))
15344 tg3_flag_set(tp, ENABLE_APE);
15346 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15347 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15348 tg3_flag_clear(tp, WOL_CAP);
15350 if (tg3_flag(tp, WOL_CAP) &&
15351 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15352 tg3_flag_set(tp, WOL_ENABLE);
15353 device_set_wakeup_enable(&tp->pdev->dev, true);
15356 if (cfg2 & (1 << 17))
15357 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15359 /* serdes signal pre-emphasis in register 0x590 set by */
15360 /* bootcode if bit 18 is set */
15361 if (cfg2 & (1 << 18))
15362 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15364 if ((tg3_flag(tp, 57765_PLUS) ||
15365 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15366 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15367 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15368 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15370 if (tg3_flag(tp, PCI_EXPRESS)) {
15373 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15374 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15375 !tg3_flag(tp, 57765_PLUS) &&
15376 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15377 tg3_flag_set(tp, ASPM_WORKAROUND);
15378 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15379 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15380 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15381 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15384 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15385 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15386 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15387 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15388 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15389 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15391 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15392 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15395 if (tg3_flag(tp, WOL_CAP))
15396 device_set_wakeup_enable(&tp->pdev->dev,
15397 tg3_flag(tp, WOL_ENABLE));
15399 device_set_wakeup_capable(&tp->pdev->dev, false);
15402 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15405 u32 val2, off = offset * 8;
15407 err = tg3_nvram_lock(tp);
15411 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15412 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15413 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15414 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15417 for (i = 0; i < 100; i++) {
15418 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15419 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15420 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15426 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15428 tg3_nvram_unlock(tp);
15429 if (val2 & APE_OTP_STATUS_CMD_DONE)
15435 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15440 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15441 tw32(OTP_CTRL, cmd);
15443 /* Wait for up to 1 ms for command to execute. */
15444 for (i = 0; i < 100; i++) {
15445 val = tr32(OTP_STATUS);
15446 if (val & OTP_STATUS_CMD_DONE)
15451 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15454 /* Read the gphy configuration from the OTP region of the chip. The gphy
15455 * configuration is a 32-bit value that straddles the alignment boundary.
15456 * We do two 32-bit reads and then shift and merge the results.
15458 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15460 u32 bhalf_otp, thalf_otp;
15462 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15464 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15467 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15469 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15472 thalf_otp = tr32(OTP_READ_DATA);
15474 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15476 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15479 bhalf_otp = tr32(OTP_READ_DATA);
15481 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15484 static void tg3_phy_init_link_config(struct tg3 *tp)
15486 u32 adv = ADVERTISED_Autoneg;
15488 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15489 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15490 adv |= ADVERTISED_1000baseT_Half;
15491 adv |= ADVERTISED_1000baseT_Full;
15494 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15495 adv |= ADVERTISED_100baseT_Half |
15496 ADVERTISED_100baseT_Full |
15497 ADVERTISED_10baseT_Half |
15498 ADVERTISED_10baseT_Full |
15501 adv |= ADVERTISED_FIBRE;
15503 tp->link_config.advertising = adv;
15504 tp->link_config.speed = SPEED_UNKNOWN;
15505 tp->link_config.duplex = DUPLEX_UNKNOWN;
15506 tp->link_config.autoneg = AUTONEG_ENABLE;
15507 tp->link_config.active_speed = SPEED_UNKNOWN;
15508 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15513 static int tg3_phy_probe(struct tg3 *tp)
15515 u32 hw_phy_id_1, hw_phy_id_2;
15516 u32 hw_phy_id, hw_phy_id_masked;
15519 /* flow control autonegotiation is default behavior */
15520 tg3_flag_set(tp, PAUSE_AUTONEG);
15521 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15523 if (tg3_flag(tp, ENABLE_APE)) {
15524 switch (tp->pci_fn) {
15526 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15529 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15532 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15535 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15540 if (!tg3_flag(tp, ENABLE_ASF) &&
15541 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15542 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15543 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15544 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15546 if (tg3_flag(tp, USE_PHYLIB))
15547 return tg3_phy_init(tp);
15549 /* Reading the PHY ID register can conflict with ASF
15550 * firmware access to the PHY hardware.
15553 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15554 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15556 /* Now read the physical PHY_ID from the chip and verify
15557 * that it is sane. If it doesn't look good, we fall back
15558 * to either the hard-coded table based PHY_ID and failing
15559 * that the value found in the eeprom area.
15561 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15562 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15564 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15565 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15566 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15568 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15571 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15572 tp->phy_id = hw_phy_id;
15573 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15574 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15576 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15578 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15579 /* Do nothing, phy ID already set up in
15580 * tg3_get_eeprom_hw_cfg().
15583 struct subsys_tbl_ent *p;
15585 /* No eeprom signature? Try the hardcoded
15586 * subsys device table.
15588 p = tg3_lookup_by_subsys(tp);
15590 tp->phy_id = p->phy_id;
15591 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15592 /* For now we saw the IDs 0xbc050cd0,
15593 * 0xbc050f80 and 0xbc050c30 on devices
15594 * connected to an BCM4785 and there are
15595 * probably more. Just assume that the phy is
15596 * supported when it is connected to a SSB core
15603 tp->phy_id == TG3_PHY_ID_BCM8002)
15604 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15608 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15609 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15610 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15611 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15612 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15613 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15614 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15615 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15616 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15617 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15619 tp->eee.supported = SUPPORTED_100baseT_Full |
15620 SUPPORTED_1000baseT_Full;
15621 tp->eee.advertised = ADVERTISED_100baseT_Full |
15622 ADVERTISED_1000baseT_Full;
15623 tp->eee.eee_enabled = 1;
15624 tp->eee.tx_lpi_enabled = 1;
15625 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15628 tg3_phy_init_link_config(tp);
15630 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15631 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15632 !tg3_flag(tp, ENABLE_APE) &&
15633 !tg3_flag(tp, ENABLE_ASF)) {
15636 tg3_readphy(tp, MII_BMSR, &bmsr);
15637 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15638 (bmsr & BMSR_LSTATUS))
15639 goto skip_phy_reset;
15641 err = tg3_phy_reset(tp);
15645 tg3_phy_set_wirespeed(tp);
15647 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15648 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15649 tp->link_config.flowctrl);
15651 tg3_writephy(tp, MII_BMCR,
15652 BMCR_ANENABLE | BMCR_ANRESTART);
15657 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15658 err = tg3_init_5401phy_dsp(tp);
15662 err = tg3_init_5401phy_dsp(tp);
15668 static void tg3_read_vpd(struct tg3 *tp)
15671 unsigned int len, vpdlen;
15674 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15678 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15679 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15683 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15686 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15687 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15691 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15692 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15695 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15696 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15698 goto out_not_found;
15700 if (len > TG3_BPN_SIZE)
15701 goto out_not_found;
15703 memcpy(tp->board_part_number, &vpd_data[i], len);
15707 if (tp->board_part_number[0])
15711 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15712 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15714 strcpy(tp->board_part_number, "BCM5717");
15715 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15716 strcpy(tp->board_part_number, "BCM5718");
15719 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15720 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15721 strcpy(tp->board_part_number, "BCM57780");
15722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15723 strcpy(tp->board_part_number, "BCM57760");
15724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15725 strcpy(tp->board_part_number, "BCM57790");
15726 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15727 strcpy(tp->board_part_number, "BCM57788");
15730 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15731 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15732 strcpy(tp->board_part_number, "BCM57761");
15733 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15734 strcpy(tp->board_part_number, "BCM57765");
15735 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15736 strcpy(tp->board_part_number, "BCM57781");
15737 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15738 strcpy(tp->board_part_number, "BCM57785");
15739 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15740 strcpy(tp->board_part_number, "BCM57791");
15741 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15742 strcpy(tp->board_part_number, "BCM57795");
15745 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15746 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15747 strcpy(tp->board_part_number, "BCM57762");
15748 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15749 strcpy(tp->board_part_number, "BCM57766");
15750 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15751 strcpy(tp->board_part_number, "BCM57782");
15752 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15753 strcpy(tp->board_part_number, "BCM57786");
15756 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15757 strcpy(tp->board_part_number, "BCM95906");
15760 strcpy(tp->board_part_number, "none");
15764 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15768 if (tg3_nvram_read(tp, offset, &val) ||
15769 (val & 0xfc000000) != 0x0c000000 ||
15770 tg3_nvram_read(tp, offset + 4, &val) ||
15777 static void tg3_read_bc_ver(struct tg3 *tp)
15779 u32 val, offset, start, ver_offset;
15781 bool newver = false;
15783 if (tg3_nvram_read(tp, 0xc, &offset) ||
15784 tg3_nvram_read(tp, 0x4, &start))
15787 offset = tg3_nvram_logical_addr(tp, offset);
15789 if (tg3_nvram_read(tp, offset, &val))
15792 if ((val & 0xfc000000) == 0x0c000000) {
15793 if (tg3_nvram_read(tp, offset + 4, &val))
15800 dst_off = strlen(tp->fw_ver);
15803 if (TG3_VER_SIZE - dst_off < 16 ||
15804 tg3_nvram_read(tp, offset + 8, &ver_offset))
15807 offset = offset + ver_offset - start;
15808 for (i = 0; i < 16; i += 4) {
15810 if (tg3_nvram_read_be32(tp, offset + i, &v))
15813 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15818 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15821 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15822 TG3_NVM_BCVER_MAJSFT;
15823 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15824 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15825 "v%d.%02d", major, minor);
15829 static void tg3_read_hwsb_ver(struct tg3 *tp)
15831 u32 val, major, minor;
15833 /* Use native endian representation */
15834 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15837 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15838 TG3_NVM_HWSB_CFG1_MAJSFT;
15839 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15840 TG3_NVM_HWSB_CFG1_MINSFT;
15842 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15845 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15847 u32 offset, major, minor, build;
15849 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15851 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15854 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15855 case TG3_EEPROM_SB_REVISION_0:
15856 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15858 case TG3_EEPROM_SB_REVISION_2:
15859 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15861 case TG3_EEPROM_SB_REVISION_3:
15862 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15864 case TG3_EEPROM_SB_REVISION_4:
15865 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15867 case TG3_EEPROM_SB_REVISION_5:
15868 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15870 case TG3_EEPROM_SB_REVISION_6:
15871 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15877 if (tg3_nvram_read(tp, offset, &val))
15880 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15881 TG3_EEPROM_SB_EDH_BLD_SHFT;
15882 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15883 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15884 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15886 if (minor > 99 || build > 26)
15889 offset = strlen(tp->fw_ver);
15890 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15891 " v%d.%02d", major, minor);
15894 offset = strlen(tp->fw_ver);
15895 if (offset < TG3_VER_SIZE - 1)
15896 tp->fw_ver[offset] = 'a' + build - 1;
15900 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15902 u32 val, offset, start;
15905 for (offset = TG3_NVM_DIR_START;
15906 offset < TG3_NVM_DIR_END;
15907 offset += TG3_NVM_DIRENT_SIZE) {
15908 if (tg3_nvram_read(tp, offset, &val))
15911 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15915 if (offset == TG3_NVM_DIR_END)
15918 if (!tg3_flag(tp, 5705_PLUS))
15919 start = 0x08000000;
15920 else if (tg3_nvram_read(tp, offset - 4, &start))
15923 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15924 !tg3_fw_img_is_valid(tp, offset) ||
15925 tg3_nvram_read(tp, offset + 8, &val))
15928 offset += val - start;
15930 vlen = strlen(tp->fw_ver);
15932 tp->fw_ver[vlen++] = ',';
15933 tp->fw_ver[vlen++] = ' ';
15935 for (i = 0; i < 4; i++) {
15937 if (tg3_nvram_read_be32(tp, offset, &v))
15940 offset += sizeof(v);
15942 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15943 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15947 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15952 static void tg3_probe_ncsi(struct tg3 *tp)
15956 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15957 if (apedata != APE_SEG_SIG_MAGIC)
15960 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15961 if (!(apedata & APE_FW_STATUS_READY))
15964 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15965 tg3_flag_set(tp, APE_HAS_NCSI);
15968 static void tg3_read_dash_ver(struct tg3 *tp)
15974 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15976 if (tg3_flag(tp, APE_HAS_NCSI))
15978 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15983 vlen = strlen(tp->fw_ver);
15985 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15987 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15988 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15989 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15990 (apedata & APE_FW_VERSION_BLDMSK));
15993 static void tg3_read_otp_ver(struct tg3 *tp)
15997 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16000 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16001 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16002 TG3_OTP_MAGIC0_VALID(val)) {
16003 u64 val64 = (u64) val << 32 | val2;
16007 for (i = 0; i < 7; i++) {
16008 if ((val64 & 0xff) == 0)
16010 ver = val64 & 0xff;
16013 vlen = strlen(tp->fw_ver);
16014 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16018 static void tg3_read_fw_ver(struct tg3 *tp)
16021 bool vpd_vers = false;
16023 if (tp->fw_ver[0] != 0)
16026 if (tg3_flag(tp, NO_NVRAM)) {
16027 strcat(tp->fw_ver, "sb");
16028 tg3_read_otp_ver(tp);
16032 if (tg3_nvram_read(tp, 0, &val))
16035 if (val == TG3_EEPROM_MAGIC)
16036 tg3_read_bc_ver(tp);
16037 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16038 tg3_read_sb_ver(tp, val);
16039 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16040 tg3_read_hwsb_ver(tp);
16042 if (tg3_flag(tp, ENABLE_ASF)) {
16043 if (tg3_flag(tp, ENABLE_APE)) {
16044 tg3_probe_ncsi(tp);
16046 tg3_read_dash_ver(tp);
16047 } else if (!vpd_vers) {
16048 tg3_read_mgmtfw_ver(tp);
16052 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16055 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16057 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16058 return TG3_RX_RET_MAX_SIZE_5717;
16059 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16060 return TG3_RX_RET_MAX_SIZE_5700;
16062 return TG3_RX_RET_MAX_SIZE_5705;
16065 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16066 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16067 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16068 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16072 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16074 struct pci_dev *peer;
16075 unsigned int func, devnr = tp->pdev->devfn & ~7;
16077 for (func = 0; func < 8; func++) {
16078 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16079 if (peer && peer != tp->pdev)
16083 /* 5704 can be configured in single-port mode, set peer to
16084 * tp->pdev in that case.
16092 * We don't need to keep the refcount elevated; there's no way
16093 * to remove one half of this device without removing the other
16100 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16102 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16103 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16106 /* All devices that use the alternate
16107 * ASIC REV location have a CPMU.
16109 tg3_flag_set(tp, CPMU_PRESENT);
16111 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16113 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16114 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16116 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16117 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16118 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16119 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16120 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16122 reg = TG3PCI_GEN2_PRODID_ASICREV;
16123 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16132 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16133 reg = TG3PCI_GEN15_PRODID_ASICREV;
16135 reg = TG3PCI_PRODID_ASICREV;
16137 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16140 /* Wrong chip ID in 5752 A0. This code can be removed later
16141 * as A0 is not in production.
16143 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16144 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16146 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16147 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16149 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16150 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16151 tg3_asic_rev(tp) == ASIC_REV_5720)
16152 tg3_flag_set(tp, 5717_PLUS);
16154 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16155 tg3_asic_rev(tp) == ASIC_REV_57766)
16156 tg3_flag_set(tp, 57765_CLASS);
16158 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16159 tg3_asic_rev(tp) == ASIC_REV_5762)
16160 tg3_flag_set(tp, 57765_PLUS);
16162 /* Intentionally exclude ASIC_REV_5906 */
16163 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16164 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16165 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16166 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16167 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16168 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16169 tg3_flag(tp, 57765_PLUS))
16170 tg3_flag_set(tp, 5755_PLUS);
16172 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16173 tg3_asic_rev(tp) == ASIC_REV_5714)
16174 tg3_flag_set(tp, 5780_CLASS);
16176 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16177 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16178 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16179 tg3_flag(tp, 5755_PLUS) ||
16180 tg3_flag(tp, 5780_CLASS))
16181 tg3_flag_set(tp, 5750_PLUS);
16183 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16184 tg3_flag(tp, 5750_PLUS))
16185 tg3_flag_set(tp, 5705_PLUS);
16188 static bool tg3_10_100_only_device(struct tg3 *tp,
16189 const struct pci_device_id *ent)
16191 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16193 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16194 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16195 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16198 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16199 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16200 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16210 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16213 u32 pci_state_reg, grc_misc_cfg;
16218 /* Force memory write invalidate off. If we leave it on,
16219 * then on 5700_BX chips we have to enable a workaround.
16220 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16221 * to match the cacheline size. The Broadcom driver have this
16222 * workaround but turns MWI off all the times so never uses
16223 * it. This seems to suggest that the workaround is insufficient.
16225 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16226 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16227 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16229 /* Important! -- Make sure register accesses are byteswapped
16230 * correctly. Also, for those chips that require it, make
16231 * sure that indirect register accesses are enabled before
16232 * the first operation.
16234 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16236 tp->misc_host_ctrl |= (misc_ctrl_reg &
16237 MISC_HOST_CTRL_CHIPREV);
16238 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16239 tp->misc_host_ctrl);
16241 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16243 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16244 * we need to disable memory and use config. cycles
16245 * only to access all registers. The 5702/03 chips
16246 * can mistakenly decode the special cycles from the
16247 * ICH chipsets as memory write cycles, causing corruption
16248 * of register and memory space. Only certain ICH bridges
16249 * will drive special cycles with non-zero data during the
16250 * address phase which can fall within the 5703's address
16251 * range. This is not an ICH bug as the PCI spec allows
16252 * non-zero address during special cycles. However, only
16253 * these ICH bridges are known to drive non-zero addresses
16254 * during special cycles.
16256 * Since special cycles do not cross PCI bridges, we only
16257 * enable this workaround if the 5703 is on the secondary
16258 * bus of these ICH bridges.
16260 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16261 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16262 static struct tg3_dev_id {
16266 } ich_chipsets[] = {
16267 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16269 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16271 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16273 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16277 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16278 struct pci_dev *bridge = NULL;
16280 while (pci_id->vendor != 0) {
16281 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16287 if (pci_id->rev != PCI_ANY_ID) {
16288 if (bridge->revision > pci_id->rev)
16291 if (bridge->subordinate &&
16292 (bridge->subordinate->number ==
16293 tp->pdev->bus->number)) {
16294 tg3_flag_set(tp, ICH_WORKAROUND);
16295 pci_dev_put(bridge);
16301 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16302 static struct tg3_dev_id {
16305 } bridge_chipsets[] = {
16306 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16307 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16310 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16311 struct pci_dev *bridge = NULL;
16313 while (pci_id->vendor != 0) {
16314 bridge = pci_get_device(pci_id->vendor,
16321 if (bridge->subordinate &&
16322 (bridge->subordinate->number <=
16323 tp->pdev->bus->number) &&
16324 (bridge->subordinate->busn_res.end >=
16325 tp->pdev->bus->number)) {
16326 tg3_flag_set(tp, 5701_DMA_BUG);
16327 pci_dev_put(bridge);
16333 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16334 * DMA addresses > 40-bit. This bridge may have other additional
16335 * 57xx devices behind it in some 4-port NIC designs for example.
16336 * Any tg3 device found behind the bridge will also need the 40-bit
16339 if (tg3_flag(tp, 5780_CLASS)) {
16340 tg3_flag_set(tp, 40BIT_DMA_BUG);
16341 tp->msi_cap = tp->pdev->msi_cap;
16343 struct pci_dev *bridge = NULL;
16346 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16347 PCI_DEVICE_ID_SERVERWORKS_EPB,
16349 if (bridge && bridge->subordinate &&
16350 (bridge->subordinate->number <=
16351 tp->pdev->bus->number) &&
16352 (bridge->subordinate->busn_res.end >=
16353 tp->pdev->bus->number)) {
16354 tg3_flag_set(tp, 40BIT_DMA_BUG);
16355 pci_dev_put(bridge);
16361 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16362 tg3_asic_rev(tp) == ASIC_REV_5714)
16363 tp->pdev_peer = tg3_find_peer(tp);
16365 /* Determine TSO capabilities */
16366 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16367 ; /* Do nothing. HW bug. */
16368 else if (tg3_flag(tp, 57765_PLUS))
16369 tg3_flag_set(tp, HW_TSO_3);
16370 else if (tg3_flag(tp, 5755_PLUS) ||
16371 tg3_asic_rev(tp) == ASIC_REV_5906)
16372 tg3_flag_set(tp, HW_TSO_2);
16373 else if (tg3_flag(tp, 5750_PLUS)) {
16374 tg3_flag_set(tp, HW_TSO_1);
16375 tg3_flag_set(tp, TSO_BUG);
16376 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16377 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16378 tg3_flag_clear(tp, TSO_BUG);
16379 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16380 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16381 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16382 tg3_flag_set(tp, FW_TSO);
16383 tg3_flag_set(tp, TSO_BUG);
16384 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16385 tp->fw_needed = FIRMWARE_TG3TSO5;
16387 tp->fw_needed = FIRMWARE_TG3TSO;
16390 /* Selectively allow TSO based on operating conditions */
16391 if (tg3_flag(tp, HW_TSO_1) ||
16392 tg3_flag(tp, HW_TSO_2) ||
16393 tg3_flag(tp, HW_TSO_3) ||
16394 tg3_flag(tp, FW_TSO)) {
16395 /* For firmware TSO, assume ASF is disabled.
16396 * We'll disable TSO later if we discover ASF
16397 * is enabled in tg3_get_eeprom_hw_cfg().
16399 tg3_flag_set(tp, TSO_CAPABLE);
16401 tg3_flag_clear(tp, TSO_CAPABLE);
16402 tg3_flag_clear(tp, TSO_BUG);
16403 tp->fw_needed = NULL;
16406 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16407 tp->fw_needed = FIRMWARE_TG3;
16409 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16410 tp->fw_needed = FIRMWARE_TG357766;
16414 if (tg3_flag(tp, 5750_PLUS)) {
16415 tg3_flag_set(tp, SUPPORT_MSI);
16416 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16417 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16418 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16419 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16420 tp->pdev_peer == tp->pdev))
16421 tg3_flag_clear(tp, SUPPORT_MSI);
16423 if (tg3_flag(tp, 5755_PLUS) ||
16424 tg3_asic_rev(tp) == ASIC_REV_5906) {
16425 tg3_flag_set(tp, 1SHOT_MSI);
16428 if (tg3_flag(tp, 57765_PLUS)) {
16429 tg3_flag_set(tp, SUPPORT_MSIX);
16430 tp->irq_max = TG3_IRQ_MAX_VECS;
16436 if (tp->irq_max > 1) {
16437 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16438 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16440 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16441 tg3_asic_rev(tp) == ASIC_REV_5720)
16442 tp->txq_max = tp->irq_max - 1;
16445 if (tg3_flag(tp, 5755_PLUS) ||
16446 tg3_asic_rev(tp) == ASIC_REV_5906)
16447 tg3_flag_set(tp, SHORT_DMA_BUG);
16449 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16450 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16452 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16453 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16454 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16455 tg3_asic_rev(tp) == ASIC_REV_5762)
16456 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16458 if (tg3_flag(tp, 57765_PLUS) &&
16459 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16460 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16462 if (!tg3_flag(tp, 5705_PLUS) ||
16463 tg3_flag(tp, 5780_CLASS) ||
16464 tg3_flag(tp, USE_JUMBO_BDFLAG))
16465 tg3_flag_set(tp, JUMBO_CAPABLE);
16467 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16470 if (pci_is_pcie(tp->pdev)) {
16473 tg3_flag_set(tp, PCI_EXPRESS);
16475 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16476 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16477 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16478 tg3_flag_clear(tp, HW_TSO_2);
16479 tg3_flag_clear(tp, TSO_CAPABLE);
16481 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16482 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16483 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16484 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16485 tg3_flag_set(tp, CLKREQ_BUG);
16486 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16487 tg3_flag_set(tp, L1PLLPD_EN);
16489 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16490 /* BCM5785 devices are effectively PCIe devices, and should
16491 * follow PCIe codepaths, but do not have a PCIe capabilities
16494 tg3_flag_set(tp, PCI_EXPRESS);
16495 } else if (!tg3_flag(tp, 5705_PLUS) ||
16496 tg3_flag(tp, 5780_CLASS)) {
16497 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16498 if (!tp->pcix_cap) {
16499 dev_err(&tp->pdev->dev,
16500 "Cannot find PCI-X capability, aborting\n");
16504 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16505 tg3_flag_set(tp, PCIX_MODE);
16508 /* If we have an AMD 762 or VIA K8T800 chipset, write
16509 * reordering to the mailbox registers done by the host
16510 * controller can cause major troubles. We read back from
16511 * every mailbox register write to force the writes to be
16512 * posted to the chip in order.
16514 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16515 !tg3_flag(tp, PCI_EXPRESS))
16516 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16518 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16519 &tp->pci_cacheline_sz);
16520 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16521 &tp->pci_lat_timer);
16522 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16523 tp->pci_lat_timer < 64) {
16524 tp->pci_lat_timer = 64;
16525 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16526 tp->pci_lat_timer);
16529 /* Important! -- It is critical that the PCI-X hw workaround
16530 * situation is decided before the first MMIO register access.
16532 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16533 /* 5700 BX chips need to have their TX producer index
16534 * mailboxes written twice to workaround a bug.
16536 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16538 /* If we are in PCI-X mode, enable register write workaround.
16540 * The workaround is to use indirect register accesses
16541 * for all chip writes not to mailbox registers.
16543 if (tg3_flag(tp, PCIX_MODE)) {
16546 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16548 /* The chip can have it's power management PCI config
16549 * space registers clobbered due to this bug.
16550 * So explicitly force the chip into D0 here.
16552 pci_read_config_dword(tp->pdev,
16553 tp->pdev->pm_cap + PCI_PM_CTRL,
16555 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16556 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16557 pci_write_config_dword(tp->pdev,
16558 tp->pdev->pm_cap + PCI_PM_CTRL,
16561 /* Also, force SERR#/PERR# in PCI command. */
16562 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16563 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16564 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16568 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16569 tg3_flag_set(tp, PCI_HIGH_SPEED);
16570 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16571 tg3_flag_set(tp, PCI_32BIT);
16573 /* Chip-specific fixup from Broadcom driver */
16574 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16575 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16576 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16577 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16580 /* Default fast path register access methods */
16581 tp->read32 = tg3_read32;
16582 tp->write32 = tg3_write32;
16583 tp->read32_mbox = tg3_read32;
16584 tp->write32_mbox = tg3_write32;
16585 tp->write32_tx_mbox = tg3_write32;
16586 tp->write32_rx_mbox = tg3_write32;
16588 /* Various workaround register access methods */
16589 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16590 tp->write32 = tg3_write_indirect_reg32;
16591 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16592 (tg3_flag(tp, PCI_EXPRESS) &&
16593 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16595 * Back to back register writes can cause problems on these
16596 * chips, the workaround is to read back all reg writes
16597 * except those to mailbox regs.
16599 * See tg3_write_indirect_reg32().
16601 tp->write32 = tg3_write_flush_reg32;
16604 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16605 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16606 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16607 tp->write32_rx_mbox = tg3_write_flush_reg32;
16610 if (tg3_flag(tp, ICH_WORKAROUND)) {
16611 tp->read32 = tg3_read_indirect_reg32;
16612 tp->write32 = tg3_write_indirect_reg32;
16613 tp->read32_mbox = tg3_read_indirect_mbox;
16614 tp->write32_mbox = tg3_write_indirect_mbox;
16615 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16616 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16621 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16622 pci_cmd &= ~PCI_COMMAND_MEMORY;
16623 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16625 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16626 tp->read32_mbox = tg3_read32_mbox_5906;
16627 tp->write32_mbox = tg3_write32_mbox_5906;
16628 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16629 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16632 if (tp->write32 == tg3_write_indirect_reg32 ||
16633 (tg3_flag(tp, PCIX_MODE) &&
16634 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16635 tg3_asic_rev(tp) == ASIC_REV_5701)))
16636 tg3_flag_set(tp, SRAM_USE_CONFIG);
16638 /* The memory arbiter has to be enabled in order for SRAM accesses
16639 * to succeed. Normally on powerup the tg3 chip firmware will make
16640 * sure it is enabled, but other entities such as system netboot
16641 * code might disable it.
16643 val = tr32(MEMARB_MODE);
16644 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16646 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16647 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16648 tg3_flag(tp, 5780_CLASS)) {
16649 if (tg3_flag(tp, PCIX_MODE)) {
16650 pci_read_config_dword(tp->pdev,
16651 tp->pcix_cap + PCI_X_STATUS,
16653 tp->pci_fn = val & 0x7;
16655 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16656 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16657 tg3_asic_rev(tp) == ASIC_REV_5720) {
16658 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16659 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16660 val = tr32(TG3_CPMU_STATUS);
16662 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16663 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16665 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16666 TG3_CPMU_STATUS_FSHFT_5719;
16669 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16670 tp->write32_tx_mbox = tg3_write_flush_reg32;
16671 tp->write32_rx_mbox = tg3_write_flush_reg32;
16674 /* Get eeprom hw config before calling tg3_set_power_state().
16675 * In particular, the TG3_FLAG_IS_NIC flag must be
16676 * determined before calling tg3_set_power_state() so that
16677 * we know whether or not to switch out of Vaux power.
16678 * When the flag is set, it means that GPIO1 is used for eeprom
16679 * write protect and also implies that it is a LOM where GPIOs
16680 * are not used to switch power.
16682 tg3_get_eeprom_hw_cfg(tp);
16684 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16685 tg3_flag_clear(tp, TSO_CAPABLE);
16686 tg3_flag_clear(tp, TSO_BUG);
16687 tp->fw_needed = NULL;
16690 if (tg3_flag(tp, ENABLE_APE)) {
16691 /* Allow reads and writes to the
16692 * APE register and memory space.
16694 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16695 PCISTATE_ALLOW_APE_SHMEM_WR |
16696 PCISTATE_ALLOW_APE_PSPACE_WR;
16697 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16700 tg3_ape_lock_init(tp);
16701 tp->ape_hb_interval =
16702 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16705 /* Set up tp->grc_local_ctrl before calling
16706 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16707 * will bring 5700's external PHY out of reset.
16708 * It is also used as eeprom write protect on LOMs.
16710 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16711 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16712 tg3_flag(tp, EEPROM_WRITE_PROT))
16713 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16714 GRC_LCLCTRL_GPIO_OUTPUT1);
16715 /* Unused GPIO3 must be driven as output on 5752 because there
16716 * are no pull-up resistors on unused GPIO pins.
16718 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16719 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16721 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16722 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16723 tg3_flag(tp, 57765_CLASS))
16724 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16726 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16728 /* Turn off the debug UART. */
16729 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16730 if (tg3_flag(tp, IS_NIC))
16731 /* Keep VMain power. */
16732 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16733 GRC_LCLCTRL_GPIO_OUTPUT0;
16736 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16737 tp->grc_local_ctrl |=
16738 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16740 /* Switch out of Vaux if it is a NIC */
16741 tg3_pwrsrc_switch_to_vmain(tp);
16743 /* Derive initial jumbo mode from MTU assigned in
16744 * ether_setup() via the alloc_etherdev() call
16746 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16747 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16749 /* Determine WakeOnLan speed to use. */
16750 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16751 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16752 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16753 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16754 tg3_flag_clear(tp, WOL_SPEED_100MB);
16756 tg3_flag_set(tp, WOL_SPEED_100MB);
16759 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16760 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16762 /* A few boards don't want Ethernet@WireSpeed phy feature */
16763 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16764 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16765 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16766 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16767 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16768 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16769 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16771 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16772 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16773 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16775 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16777 if (tg3_flag(tp, 5705_PLUS) &&
16778 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16779 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16780 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16781 !tg3_flag(tp, 57765_PLUS)) {
16782 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16783 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16784 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16785 tg3_asic_rev(tp) == ASIC_REV_5761) {
16786 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16787 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16788 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16789 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16790 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16792 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16795 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16796 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16797 tp->phy_otp = tg3_read_otp_phycfg(tp);
16798 if (tp->phy_otp == 0)
16799 tp->phy_otp = TG3_OTP_DEFAULT;
16802 if (tg3_flag(tp, CPMU_PRESENT))
16803 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16805 tp->mi_mode = MAC_MI_MODE_BASE;
16807 tp->coalesce_mode = 0;
16808 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16809 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16810 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16812 /* Set these bits to enable statistics workaround. */
16813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16814 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16815 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16816 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16817 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16818 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16821 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16822 tg3_asic_rev(tp) == ASIC_REV_57780)
16823 tg3_flag_set(tp, USE_PHYLIB);
16825 err = tg3_mdio_init(tp);
16829 /* Initialize data/descriptor byte/word swapping. */
16830 val = tr32(GRC_MODE);
16831 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16832 tg3_asic_rev(tp) == ASIC_REV_5762)
16833 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16834 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16835 GRC_MODE_B2HRX_ENABLE |
16836 GRC_MODE_HTX2B_ENABLE |
16837 GRC_MODE_HOST_STACKUP);
16839 val &= GRC_MODE_HOST_STACKUP;
16841 tw32(GRC_MODE, val | tp->grc_mode);
16843 tg3_switch_clocks(tp);
16845 /* Clear this out for sanity. */
16846 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16848 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16849 tw32(TG3PCI_REG_BASE_ADDR, 0);
16851 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16853 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16854 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16855 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16856 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16857 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16858 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16859 void __iomem *sram_base;
16861 /* Write some dummy words into the SRAM status block
16862 * area, see if it reads back correctly. If the return
16863 * value is bad, force enable the PCIX workaround.
16865 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16867 writel(0x00000000, sram_base);
16868 writel(0x00000000, sram_base + 4);
16869 writel(0xffffffff, sram_base + 4);
16870 if (readl(sram_base) != 0x00000000)
16871 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16876 tg3_nvram_init(tp);
16878 /* If the device has an NVRAM, no need to load patch firmware */
16879 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16880 !tg3_flag(tp, NO_NVRAM))
16881 tp->fw_needed = NULL;
16883 grc_misc_cfg = tr32(GRC_MISC_CFG);
16884 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16886 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16887 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16888 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16889 tg3_flag_set(tp, IS_5788);
16891 if (!tg3_flag(tp, IS_5788) &&
16892 tg3_asic_rev(tp) != ASIC_REV_5700)
16893 tg3_flag_set(tp, TAGGED_STATUS);
16894 if (tg3_flag(tp, TAGGED_STATUS)) {
16895 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16896 HOSTCC_MODE_CLRTICK_TXBD);
16898 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16899 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16900 tp->misc_host_ctrl);
16903 /* Preserve the APE MAC_MODE bits */
16904 if (tg3_flag(tp, ENABLE_APE))
16905 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16909 if (tg3_10_100_only_device(tp, ent))
16910 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16912 err = tg3_phy_probe(tp);
16914 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16915 /* ... but do not return immediately ... */
16920 tg3_read_fw_ver(tp);
16922 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16923 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16925 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16926 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16928 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16931 /* 5700 {AX,BX} chips have a broken status block link
16932 * change bit implementation, so we must use the
16933 * status register in those cases.
16935 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16936 tg3_flag_set(tp, USE_LINKCHG_REG);
16938 tg3_flag_clear(tp, USE_LINKCHG_REG);
16940 /* The led_ctrl is set during tg3_phy_probe, here we might
16941 * have to force the link status polling mechanism based
16942 * upon subsystem IDs.
16944 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16945 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16946 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16947 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16948 tg3_flag_set(tp, USE_LINKCHG_REG);
16951 /* For all SERDES we poll the MAC status register. */
16952 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16953 tg3_flag_set(tp, POLL_SERDES);
16955 tg3_flag_clear(tp, POLL_SERDES);
16957 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16958 tg3_flag_set(tp, POLL_CPMU_LINK);
16960 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16961 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16962 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16963 tg3_flag(tp, PCIX_MODE)) {
16964 tp->rx_offset = NET_SKB_PAD;
16965 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16966 tp->rx_copy_thresh = ~(u16)0;
16970 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16971 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16972 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16974 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16976 /* Increment the rx prod index on the rx std ring by at most
16977 * 8 for these chips to workaround hw errata.
16979 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16980 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16981 tg3_asic_rev(tp) == ASIC_REV_5755)
16982 tp->rx_std_max_post = 8;
16984 if (tg3_flag(tp, ASPM_WORKAROUND))
16985 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16986 PCIE_PWR_MGMT_L1_THRESH_MSK;
16991 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16993 u32 hi, lo, mac_offset;
16997 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17000 if (tg3_flag(tp, IS_SSB_CORE)) {
17001 err = ssb_gige_get_macaddr(tp->pdev, addr);
17002 if (!err && is_valid_ether_addr(addr))
17007 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17008 tg3_flag(tp, 5780_CLASS)) {
17009 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17011 if (tg3_nvram_lock(tp))
17012 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17014 tg3_nvram_unlock(tp);
17015 } else if (tg3_flag(tp, 5717_PLUS)) {
17016 if (tp->pci_fn & 1)
17018 if (tp->pci_fn > 1)
17019 mac_offset += 0x18c;
17020 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17023 /* First try to get it from MAC address mailbox. */
17024 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17025 if ((hi >> 16) == 0x484b) {
17026 addr[0] = (hi >> 8) & 0xff;
17027 addr[1] = (hi >> 0) & 0xff;
17029 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17030 addr[2] = (lo >> 24) & 0xff;
17031 addr[3] = (lo >> 16) & 0xff;
17032 addr[4] = (lo >> 8) & 0xff;
17033 addr[5] = (lo >> 0) & 0xff;
17035 /* Some old bootcode may report a 0 MAC address in SRAM */
17036 addr_ok = is_valid_ether_addr(addr);
17039 /* Next, try NVRAM. */
17040 if (!tg3_flag(tp, NO_NVRAM) &&
17041 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17042 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17043 memcpy(&addr[0], ((char *)&hi) + 2, 2);
17044 memcpy(&addr[2], (char *)&lo, sizeof(lo));
17046 /* Finally just fetch it out of the MAC control regs. */
17048 hi = tr32(MAC_ADDR_0_HIGH);
17049 lo = tr32(MAC_ADDR_0_LOW);
17051 addr[5] = lo & 0xff;
17052 addr[4] = (lo >> 8) & 0xff;
17053 addr[3] = (lo >> 16) & 0xff;
17054 addr[2] = (lo >> 24) & 0xff;
17055 addr[1] = hi & 0xff;
17056 addr[0] = (hi >> 8) & 0xff;
17060 if (!is_valid_ether_addr(addr))
17065 #define BOUNDARY_SINGLE_CACHELINE 1
17066 #define BOUNDARY_MULTI_CACHELINE 2
17068 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17070 int cacheline_size;
17074 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17076 cacheline_size = 1024;
17078 cacheline_size = (int) byte * 4;
17080 /* On 5703 and later chips, the boundary bits have no
17083 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17084 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17085 !tg3_flag(tp, PCI_EXPRESS))
17088 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17089 goal = BOUNDARY_MULTI_CACHELINE;
17091 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17092 goal = BOUNDARY_SINGLE_CACHELINE;
17098 if (tg3_flag(tp, 57765_PLUS)) {
17099 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17106 /* PCI controllers on most RISC systems tend to disconnect
17107 * when a device tries to burst across a cache-line boundary.
17108 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17110 * Unfortunately, for PCI-E there are only limited
17111 * write-side controls for this, and thus for reads
17112 * we will still get the disconnects. We'll also waste
17113 * these PCI cycles for both read and write for chips
17114 * other than 5700 and 5701 which do not implement the
17117 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17118 switch (cacheline_size) {
17123 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17124 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17125 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17127 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17128 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17133 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17134 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17138 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17139 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17142 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17143 switch (cacheline_size) {
17147 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17148 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17149 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17155 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17156 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17160 switch (cacheline_size) {
17162 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17164 DMA_RWCTRL_WRITE_BNDRY_16);
17169 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17170 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17171 DMA_RWCTRL_WRITE_BNDRY_32);
17176 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17178 DMA_RWCTRL_WRITE_BNDRY_64);
17183 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17184 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17185 DMA_RWCTRL_WRITE_BNDRY_128);
17190 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17191 DMA_RWCTRL_WRITE_BNDRY_256);
17194 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17195 DMA_RWCTRL_WRITE_BNDRY_512);
17199 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17200 DMA_RWCTRL_WRITE_BNDRY_1024);
17209 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17210 int size, bool to_device)
17212 struct tg3_internal_buffer_desc test_desc;
17213 u32 sram_dma_descs;
17216 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17218 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17219 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17220 tw32(RDMAC_STATUS, 0);
17221 tw32(WDMAC_STATUS, 0);
17223 tw32(BUFMGR_MODE, 0);
17224 tw32(FTQ_RESET, 0);
17226 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17227 test_desc.addr_lo = buf_dma & 0xffffffff;
17228 test_desc.nic_mbuf = 0x00002100;
17229 test_desc.len = size;
17232 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17233 * the *second* time the tg3 driver was getting loaded after an
17236 * Broadcom tells me:
17237 * ...the DMA engine is connected to the GRC block and a DMA
17238 * reset may affect the GRC block in some unpredictable way...
17239 * The behavior of resets to individual blocks has not been tested.
17241 * Broadcom noted the GRC reset will also reset all sub-components.
17244 test_desc.cqid_sqid = (13 << 8) | 2;
17246 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17249 test_desc.cqid_sqid = (16 << 8) | 7;
17251 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17254 test_desc.flags = 0x00000005;
17256 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17259 val = *(((u32 *)&test_desc) + i);
17260 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17261 sram_dma_descs + (i * sizeof(u32)));
17262 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17264 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17267 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17269 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17272 for (i = 0; i < 40; i++) {
17276 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17278 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17279 if ((val & 0xffff) == sram_dma_descs) {
17290 #define TEST_BUFFER_SIZE 0x2000
17292 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17293 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17297 static int tg3_test_dma(struct tg3 *tp)
17299 dma_addr_t buf_dma;
17300 u32 *buf, saved_dma_rwctrl;
17303 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17304 &buf_dma, GFP_KERNEL);
17310 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17311 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17313 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17315 if (tg3_flag(tp, 57765_PLUS))
17318 if (tg3_flag(tp, PCI_EXPRESS)) {
17319 /* DMA read watermark not used on PCIE */
17320 tp->dma_rwctrl |= 0x00180000;
17321 } else if (!tg3_flag(tp, PCIX_MODE)) {
17322 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17323 tg3_asic_rev(tp) == ASIC_REV_5750)
17324 tp->dma_rwctrl |= 0x003f0000;
17326 tp->dma_rwctrl |= 0x003f000f;
17328 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17329 tg3_asic_rev(tp) == ASIC_REV_5704) {
17330 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17331 u32 read_water = 0x7;
17333 /* If the 5704 is behind the EPB bridge, we can
17334 * do the less restrictive ONE_DMA workaround for
17335 * better performance.
17337 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17338 tg3_asic_rev(tp) == ASIC_REV_5704)
17339 tp->dma_rwctrl |= 0x8000;
17340 else if (ccval == 0x6 || ccval == 0x7)
17341 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17343 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17345 /* Set bit 23 to enable PCIX hw bug fix */
17347 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17348 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17350 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17351 /* 5780 always in PCIX mode */
17352 tp->dma_rwctrl |= 0x00144000;
17353 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17354 /* 5714 always in PCIX mode */
17355 tp->dma_rwctrl |= 0x00148000;
17357 tp->dma_rwctrl |= 0x001b000f;
17360 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17361 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17363 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17364 tg3_asic_rev(tp) == ASIC_REV_5704)
17365 tp->dma_rwctrl &= 0xfffffff0;
17367 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17368 tg3_asic_rev(tp) == ASIC_REV_5701) {
17369 /* Remove this if it causes problems for some boards. */
17370 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17372 /* On 5700/5701 chips, we need to set this bit.
17373 * Otherwise the chip will issue cacheline transactions
17374 * to streamable DMA memory with not all the byte
17375 * enables turned on. This is an error on several
17376 * RISC PCI controllers, in particular sparc64.
17378 * On 5703/5704 chips, this bit has been reassigned
17379 * a different meaning. In particular, it is used
17380 * on those chips to enable a PCI-X workaround.
17382 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17385 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17388 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17389 tg3_asic_rev(tp) != ASIC_REV_5701)
17392 /* It is best to perform DMA test with maximum write burst size
17393 * to expose the 5700/5701 write DMA bug.
17395 saved_dma_rwctrl = tp->dma_rwctrl;
17396 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17397 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17402 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17405 /* Send the buffer to the chip. */
17406 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17408 dev_err(&tp->pdev->dev,
17409 "%s: Buffer write failed. err = %d\n",
17414 /* Now read it back. */
17415 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17417 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17418 "err = %d\n", __func__, ret);
17423 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17427 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17428 DMA_RWCTRL_WRITE_BNDRY_16) {
17429 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17430 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17431 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17434 dev_err(&tp->pdev->dev,
17435 "%s: Buffer corrupted on read back! "
17436 "(%d != %d)\n", __func__, p[i], i);
17442 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17448 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17449 DMA_RWCTRL_WRITE_BNDRY_16) {
17450 /* DMA test passed without adjusting DMA boundary,
17451 * now look for chipsets that are known to expose the
17452 * DMA bug without failing the test.
17454 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17455 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17456 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17458 /* Safe to use the calculated DMA boundary. */
17459 tp->dma_rwctrl = saved_dma_rwctrl;
17462 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17466 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17471 static void tg3_init_bufmgr_config(struct tg3 *tp)
17473 if (tg3_flag(tp, 57765_PLUS)) {
17474 tp->bufmgr_config.mbuf_read_dma_low_water =
17475 DEFAULT_MB_RDMA_LOW_WATER_5705;
17476 tp->bufmgr_config.mbuf_mac_rx_low_water =
17477 DEFAULT_MB_MACRX_LOW_WATER_57765;
17478 tp->bufmgr_config.mbuf_high_water =
17479 DEFAULT_MB_HIGH_WATER_57765;
17481 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17482 DEFAULT_MB_RDMA_LOW_WATER_5705;
17483 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17484 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17485 tp->bufmgr_config.mbuf_high_water_jumbo =
17486 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17487 } else if (tg3_flag(tp, 5705_PLUS)) {
17488 tp->bufmgr_config.mbuf_read_dma_low_water =
17489 DEFAULT_MB_RDMA_LOW_WATER_5705;
17490 tp->bufmgr_config.mbuf_mac_rx_low_water =
17491 DEFAULT_MB_MACRX_LOW_WATER_5705;
17492 tp->bufmgr_config.mbuf_high_water =
17493 DEFAULT_MB_HIGH_WATER_5705;
17494 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17495 tp->bufmgr_config.mbuf_mac_rx_low_water =
17496 DEFAULT_MB_MACRX_LOW_WATER_5906;
17497 tp->bufmgr_config.mbuf_high_water =
17498 DEFAULT_MB_HIGH_WATER_5906;
17501 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17502 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17503 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17504 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17505 tp->bufmgr_config.mbuf_high_water_jumbo =
17506 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17508 tp->bufmgr_config.mbuf_read_dma_low_water =
17509 DEFAULT_MB_RDMA_LOW_WATER;
17510 tp->bufmgr_config.mbuf_mac_rx_low_water =
17511 DEFAULT_MB_MACRX_LOW_WATER;
17512 tp->bufmgr_config.mbuf_high_water =
17513 DEFAULT_MB_HIGH_WATER;
17515 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17516 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17517 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17518 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17519 tp->bufmgr_config.mbuf_high_water_jumbo =
17520 DEFAULT_MB_HIGH_WATER_JUMBO;
17523 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17524 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17527 static char *tg3_phy_string(struct tg3 *tp)
17529 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17530 case TG3_PHY_ID_BCM5400: return "5400";
17531 case TG3_PHY_ID_BCM5401: return "5401";
17532 case TG3_PHY_ID_BCM5411: return "5411";
17533 case TG3_PHY_ID_BCM5701: return "5701";
17534 case TG3_PHY_ID_BCM5703: return "5703";
17535 case TG3_PHY_ID_BCM5704: return "5704";
17536 case TG3_PHY_ID_BCM5705: return "5705";
17537 case TG3_PHY_ID_BCM5750: return "5750";
17538 case TG3_PHY_ID_BCM5752: return "5752";
17539 case TG3_PHY_ID_BCM5714: return "5714";
17540 case TG3_PHY_ID_BCM5780: return "5780";
17541 case TG3_PHY_ID_BCM5755: return "5755";
17542 case TG3_PHY_ID_BCM5787: return "5787";
17543 case TG3_PHY_ID_BCM5784: return "5784";
17544 case TG3_PHY_ID_BCM5756: return "5722/5756";
17545 case TG3_PHY_ID_BCM5906: return "5906";
17546 case TG3_PHY_ID_BCM5761: return "5761";
17547 case TG3_PHY_ID_BCM5718C: return "5718C";
17548 case TG3_PHY_ID_BCM5718S: return "5718S";
17549 case TG3_PHY_ID_BCM57765: return "57765";
17550 case TG3_PHY_ID_BCM5719C: return "5719C";
17551 case TG3_PHY_ID_BCM5720C: return "5720C";
17552 case TG3_PHY_ID_BCM5762: return "5762C";
17553 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17554 case 0: return "serdes";
17555 default: return "unknown";
17559 static char *tg3_bus_string(struct tg3 *tp, char *str)
17561 if (tg3_flag(tp, PCI_EXPRESS)) {
17562 strcpy(str, "PCI Express");
17564 } else if (tg3_flag(tp, PCIX_MODE)) {
17565 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17567 strcpy(str, "PCIX:");
17569 if ((clock_ctrl == 7) ||
17570 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17571 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17572 strcat(str, "133MHz");
17573 else if (clock_ctrl == 0)
17574 strcat(str, "33MHz");
17575 else if (clock_ctrl == 2)
17576 strcat(str, "50MHz");
17577 else if (clock_ctrl == 4)
17578 strcat(str, "66MHz");
17579 else if (clock_ctrl == 6)
17580 strcat(str, "100MHz");
17582 strcpy(str, "PCI:");
17583 if (tg3_flag(tp, PCI_HIGH_SPEED))
17584 strcat(str, "66MHz");
17586 strcat(str, "33MHz");
17588 if (tg3_flag(tp, PCI_32BIT))
17589 strcat(str, ":32-bit");
17591 strcat(str, ":64-bit");
17595 static void tg3_init_coal(struct tg3 *tp)
17597 struct ethtool_coalesce *ec = &tp->coal;
17599 memset(ec, 0, sizeof(*ec));
17600 ec->cmd = ETHTOOL_GCOALESCE;
17601 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17602 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17603 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17604 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17605 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17606 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17607 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17608 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17609 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17611 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17612 HOSTCC_MODE_CLRTICK_TXBD)) {
17613 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17614 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17615 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17616 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17619 if (tg3_flag(tp, 5705_PLUS)) {
17620 ec->rx_coalesce_usecs_irq = 0;
17621 ec->tx_coalesce_usecs_irq = 0;
17622 ec->stats_block_coalesce_usecs = 0;
17626 static int tg3_init_one(struct pci_dev *pdev,
17627 const struct pci_device_id *ent)
17629 struct net_device *dev;
17632 u32 sndmbx, rcvmbx, intmbx;
17634 u64 dma_mask, persist_dma_mask;
17635 netdev_features_t features = 0;
17636 u8 addr[ETH_ALEN] __aligned(2);
17638 err = pci_enable_device(pdev);
17640 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17644 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17646 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17647 goto err_out_disable_pdev;
17650 pci_set_master(pdev);
17652 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17655 goto err_out_free_res;
17658 SET_NETDEV_DEV(dev, &pdev->dev);
17660 tp = netdev_priv(dev);
17663 tp->rx_mode = TG3_DEF_RX_MODE;
17664 tp->tx_mode = TG3_DEF_TX_MODE;
17666 tp->pcierr_recovery = false;
17669 tp->msg_enable = tg3_debug;
17671 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17673 if (pdev_is_ssb_gige_core(pdev)) {
17674 tg3_flag_set(tp, IS_SSB_CORE);
17675 if (ssb_gige_must_flush_posted_writes(pdev))
17676 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17677 if (ssb_gige_one_dma_at_once(pdev))
17678 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17679 if (ssb_gige_have_roboswitch(pdev)) {
17680 tg3_flag_set(tp, USE_PHYLIB);
17681 tg3_flag_set(tp, ROBOSWITCH);
17683 if (ssb_gige_is_rgmii(pdev))
17684 tg3_flag_set(tp, RGMII_MODE);
17687 /* The word/byte swap controls here control register access byte
17688 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17691 tp->misc_host_ctrl =
17692 MISC_HOST_CTRL_MASK_PCI_INT |
17693 MISC_HOST_CTRL_WORD_SWAP |
17694 MISC_HOST_CTRL_INDIR_ACCESS |
17695 MISC_HOST_CTRL_PCISTATE_RW;
17697 /* The NONFRM (non-frame) byte/word swap controls take effect
17698 * on descriptor entries, anything which isn't packet data.
17700 * The StrongARM chips on the board (one for tx, one for rx)
17701 * are running in big-endian mode.
17703 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17704 GRC_MODE_WSWAP_NONFRM_DATA);
17705 #ifdef __BIG_ENDIAN
17706 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17708 spin_lock_init(&tp->lock);
17709 spin_lock_init(&tp->indirect_lock);
17710 INIT_WORK(&tp->reset_task, tg3_reset_task);
17712 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17714 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17716 goto err_out_free_dev;
17719 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17720 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17729 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17730 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17731 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17732 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17733 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17734 tg3_flag_set(tp, ENABLE_APE);
17735 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17736 if (!tp->aperegs) {
17737 dev_err(&pdev->dev,
17738 "Cannot map APE registers, aborting\n");
17740 goto err_out_iounmap;
17744 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17745 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17747 dev->ethtool_ops = &tg3_ethtool_ops;
17748 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17749 dev->netdev_ops = &tg3_netdev_ops;
17750 dev->irq = pdev->irq;
17752 err = tg3_get_invariants(tp, ent);
17754 dev_err(&pdev->dev,
17755 "Problem fetching invariants of chip, aborting\n");
17756 goto err_out_apeunmap;
17759 /* The EPB bridge inside 5714, 5715, and 5780 and any
17760 * device behind the EPB cannot support DMA addresses > 40-bit.
17761 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17762 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17763 * do DMA address check in __tg3_start_xmit().
17765 if (tg3_flag(tp, IS_5788))
17766 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17767 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17768 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17769 #ifdef CONFIG_HIGHMEM
17770 dma_mask = DMA_BIT_MASK(64);
17773 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17775 /* Configure DMA attributes. */
17776 if (dma_mask > DMA_BIT_MASK(32)) {
17777 err = dma_set_mask(&pdev->dev, dma_mask);
17779 features |= NETIF_F_HIGHDMA;
17780 err = dma_set_coherent_mask(&pdev->dev,
17783 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17784 "DMA for consistent allocations\n");
17785 goto err_out_apeunmap;
17789 if (err || dma_mask == DMA_BIT_MASK(32)) {
17790 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17792 dev_err(&pdev->dev,
17793 "No usable DMA configuration, aborting\n");
17794 goto err_out_apeunmap;
17798 tg3_init_bufmgr_config(tp);
17800 /* 5700 B0 chips do not support checksumming correctly due
17801 * to hardware bugs.
17803 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17804 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17806 if (tg3_flag(tp, 5755_PLUS))
17807 features |= NETIF_F_IPV6_CSUM;
17810 /* TSO is on by default on chips that support hardware TSO.
17811 * Firmware TSO on older chips gives lower performance, so it
17812 * is off by default, but can be enabled using ethtool.
17814 if ((tg3_flag(tp, HW_TSO_1) ||
17815 tg3_flag(tp, HW_TSO_2) ||
17816 tg3_flag(tp, HW_TSO_3)) &&
17817 (features & NETIF_F_IP_CSUM))
17818 features |= NETIF_F_TSO;
17819 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17820 if (features & NETIF_F_IPV6_CSUM)
17821 features |= NETIF_F_TSO6;
17822 if (tg3_flag(tp, HW_TSO_3) ||
17823 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17824 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17825 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17826 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17827 tg3_asic_rev(tp) == ASIC_REV_57780)
17828 features |= NETIF_F_TSO_ECN;
17831 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17832 NETIF_F_HW_VLAN_CTAG_RX;
17833 dev->vlan_features |= features;
17836 * Add loopback capability only for a subset of devices that support
17837 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17838 * loopback for the remaining devices.
17840 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17841 !tg3_flag(tp, CPMU_PRESENT))
17842 /* Add the loopback capability */
17843 features |= NETIF_F_LOOPBACK;
17845 dev->hw_features |= features;
17846 dev->priv_flags |= IFF_UNICAST_FLT;
17848 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17849 dev->min_mtu = TG3_MIN_MTU;
17850 dev->max_mtu = TG3_MAX_MTU(tp);
17852 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17853 !tg3_flag(tp, TSO_CAPABLE) &&
17854 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17855 tg3_flag_set(tp, MAX_RXPEND_64);
17856 tp->rx_pending = 63;
17859 err = tg3_get_device_address(tp, addr);
17861 dev_err(&pdev->dev,
17862 "Could not obtain valid ethernet address, aborting\n");
17863 goto err_out_apeunmap;
17865 eth_hw_addr_set(dev, addr);
17867 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17868 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17869 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17870 for (i = 0; i < tp->irq_max; i++) {
17871 struct tg3_napi *tnapi = &tp->napi[i];
17874 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17876 tnapi->int_mbox = intmbx;
17879 tnapi->consmbox = rcvmbx;
17880 tnapi->prodmbox = sndmbx;
17883 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17885 tnapi->coal_now = HOSTCC_MODE_NOW;
17887 if (!tg3_flag(tp, SUPPORT_MSIX))
17891 * If we support MSIX, we'll be using RSS. If we're using
17892 * RSS, the first vector only handles link interrupts and the
17893 * remaining vectors handle rx and tx interrupts. Reuse the
17894 * mailbox values for the next iteration. The values we setup
17895 * above are still useful for the single vectored mode.
17909 * Reset chip in case UNDI or EFI driver did not shutdown
17910 * DMA self test will enable WDMAC and we'll see (spurious)
17911 * pending DMA on the PCI bus at that point.
17913 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17914 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17915 tg3_full_lock(tp, 0);
17916 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17917 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17918 tg3_full_unlock(tp);
17921 err = tg3_test_dma(tp);
17923 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17924 goto err_out_apeunmap;
17929 pci_set_drvdata(pdev, dev);
17931 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17932 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17933 tg3_asic_rev(tp) == ASIC_REV_5762)
17934 tg3_flag_set(tp, PTP_CAPABLE);
17936 tg3_timer_init(tp);
17938 tg3_carrier_off(tp);
17940 err = register_netdev(dev);
17942 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17943 goto err_out_apeunmap;
17946 if (tg3_flag(tp, PTP_CAPABLE)) {
17948 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17950 if (IS_ERR(tp->ptp_clock))
17951 tp->ptp_clock = NULL;
17954 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17955 tp->board_part_number,
17956 tg3_chip_rev_id(tp),
17957 tg3_bus_string(tp, str),
17960 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17963 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17964 ethtype = "10/100Base-TX";
17965 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17966 ethtype = "1000Base-SX";
17968 ethtype = "10/100/1000Base-T";
17970 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17971 "(WireSpeed[%d], EEE[%d])\n",
17972 tg3_phy_string(tp), ethtype,
17973 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17974 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17977 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17978 (dev->features & NETIF_F_RXCSUM) != 0,
17979 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17980 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17981 tg3_flag(tp, ENABLE_ASF) != 0,
17982 tg3_flag(tp, TSO_CAPABLE) != 0);
17983 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17985 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17986 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17988 pci_save_state(pdev);
17994 iounmap(tp->aperegs);
17995 tp->aperegs = NULL;
18008 pci_release_regions(pdev);
18010 err_out_disable_pdev:
18011 if (pci_is_enabled(pdev))
18012 pci_disable_device(pdev);
18016 static void tg3_remove_one(struct pci_dev *pdev)
18018 struct net_device *dev = pci_get_drvdata(pdev);
18021 struct tg3 *tp = netdev_priv(dev);
18025 release_firmware(tp->fw);
18027 tg3_reset_task_cancel(tp);
18029 if (tg3_flag(tp, USE_PHYLIB)) {
18034 unregister_netdev(dev);
18036 iounmap(tp->aperegs);
18037 tp->aperegs = NULL;
18044 pci_release_regions(pdev);
18045 pci_disable_device(pdev);
18049 #ifdef CONFIG_PM_SLEEP
18050 static int tg3_suspend(struct device *device)
18052 struct net_device *dev = dev_get_drvdata(device);
18053 struct tg3 *tp = netdev_priv(dev);
18058 if (!netif_running(dev))
18061 tg3_reset_task_cancel(tp);
18063 tg3_netif_stop(tp);
18065 tg3_timer_stop(tp);
18067 tg3_full_lock(tp, 1);
18068 tg3_disable_ints(tp);
18069 tg3_full_unlock(tp);
18071 netif_device_detach(dev);
18073 tg3_full_lock(tp, 0);
18074 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18075 tg3_flag_clear(tp, INIT_COMPLETE);
18076 tg3_full_unlock(tp);
18078 err = tg3_power_down_prepare(tp);
18082 tg3_full_lock(tp, 0);
18084 tg3_flag_set(tp, INIT_COMPLETE);
18085 err2 = tg3_restart_hw(tp, true);
18089 tg3_timer_start(tp);
18091 netif_device_attach(dev);
18092 tg3_netif_start(tp);
18095 tg3_full_unlock(tp);
18106 static int tg3_resume(struct device *device)
18108 struct net_device *dev = dev_get_drvdata(device);
18109 struct tg3 *tp = netdev_priv(dev);
18114 if (!netif_running(dev))
18117 netif_device_attach(dev);
18119 tg3_full_lock(tp, 0);
18121 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18123 tg3_flag_set(tp, INIT_COMPLETE);
18124 err = tg3_restart_hw(tp,
18125 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18129 tg3_timer_start(tp);
18131 tg3_netif_start(tp);
18134 tg3_full_unlock(tp);
18143 #endif /* CONFIG_PM_SLEEP */
18145 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18147 static void tg3_shutdown(struct pci_dev *pdev)
18149 struct net_device *dev = pci_get_drvdata(pdev);
18150 struct tg3 *tp = netdev_priv(dev);
18152 tg3_reset_task_cancel(tp);
18156 netif_device_detach(dev);
18158 if (netif_running(dev))
18161 if (system_state == SYSTEM_POWER_OFF)
18162 tg3_power_down(tp);
18166 pci_disable_device(pdev);
18170 * tg3_io_error_detected - called when PCI error is detected
18171 * @pdev: Pointer to PCI device
18172 * @state: The current pci connection state
18174 * This function is called after a PCI bus error affecting
18175 * this device has been detected.
18177 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18178 pci_channel_state_t state)
18180 struct net_device *netdev = pci_get_drvdata(pdev);
18181 struct tg3 *tp = netdev_priv(netdev);
18182 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18184 netdev_info(netdev, "PCI I/O error detected\n");
18186 /* Want to make sure that the reset task doesn't run */
18187 tg3_reset_task_cancel(tp);
18191 /* Could be second call or maybe we don't have netdev yet */
18192 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18195 /* We needn't recover from permanent error */
18196 if (state == pci_channel_io_frozen)
18197 tp->pcierr_recovery = true;
18201 tg3_netif_stop(tp);
18203 tg3_timer_stop(tp);
18205 netif_device_detach(netdev);
18207 /* Clean up software state, even if MMIO is blocked */
18208 tg3_full_lock(tp, 0);
18209 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18210 tg3_full_unlock(tp);
18213 if (state == pci_channel_io_perm_failure) {
18215 tg3_napi_enable(tp);
18218 err = PCI_ERS_RESULT_DISCONNECT;
18220 pci_disable_device(pdev);
18229 * tg3_io_slot_reset - called after the pci bus has been reset.
18230 * @pdev: Pointer to PCI device
18232 * Restart the card from scratch, as if from a cold-boot.
18233 * At this point, the card has exprienced a hard reset,
18234 * followed by fixups by BIOS, and has its config space
18235 * set up identically to what it was at cold boot.
18237 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18239 struct net_device *netdev = pci_get_drvdata(pdev);
18240 struct tg3 *tp = netdev_priv(netdev);
18241 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18246 if (pci_enable_device(pdev)) {
18247 dev_err(&pdev->dev,
18248 "Cannot re-enable PCI device after reset.\n");
18252 pci_set_master(pdev);
18253 pci_restore_state(pdev);
18254 pci_save_state(pdev);
18256 if (!netdev || !netif_running(netdev)) {
18257 rc = PCI_ERS_RESULT_RECOVERED;
18261 err = tg3_power_up(tp);
18265 rc = PCI_ERS_RESULT_RECOVERED;
18268 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18269 tg3_napi_enable(tp);
18278 * tg3_io_resume - called when traffic can start flowing again.
18279 * @pdev: Pointer to PCI device
18281 * This callback is called when the error recovery driver tells
18282 * us that its OK to resume normal operation.
18284 static void tg3_io_resume(struct pci_dev *pdev)
18286 struct net_device *netdev = pci_get_drvdata(pdev);
18287 struct tg3 *tp = netdev_priv(netdev);
18292 if (!netdev || !netif_running(netdev))
18295 tg3_full_lock(tp, 0);
18296 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18297 tg3_flag_set(tp, INIT_COMPLETE);
18298 err = tg3_restart_hw(tp, true);
18300 tg3_full_unlock(tp);
18301 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18305 netif_device_attach(netdev);
18307 tg3_timer_start(tp);
18309 tg3_netif_start(tp);
18311 tg3_full_unlock(tp);
18316 tp->pcierr_recovery = false;
18320 static const struct pci_error_handlers tg3_err_handler = {
18321 .error_detected = tg3_io_error_detected,
18322 .slot_reset = tg3_io_slot_reset,
18323 .resume = tg3_io_resume
18326 static struct pci_driver tg3_driver = {
18327 .name = DRV_MODULE_NAME,
18328 .id_table = tg3_pci_tbl,
18329 .probe = tg3_init_one,
18330 .remove = tg3_remove_one,
18331 .err_handler = &tg3_err_handler,
18332 .driver.pm = &tg3_pm_ops,
18333 .shutdown = tg3_shutdown,
18336 module_pci_driver(tg3_driver);