2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
58 #include <net/checksum.h>
62 #include <asm/byteorder.h>
63 #include <linux/uaccess.h>
65 #include <uapi/linux/net_tstamp.h>
66 #include <linux/ptp_clock_kernel.h>
69 #include <asm/idprom.h>
78 /* Functions & macros to verify TG3_FLAGS types */
80 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
82 return test_bit(flag, bits);
85 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
90 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 clear_bit(flag, bits);
95 #define tg3_flag(tp, flag) \
96 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_set(tp, flag) \
98 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
99 #define tg3_flag_clear(tp, flag) \
100 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
102 #define DRV_MODULE_NAME "tg3"
103 #define TG3_MAJ_NUM 3
104 #define TG3_MIN_NUM 137
105 #define DRV_MODULE_VERSION \
106 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
107 #define DRV_MODULE_RELDATE "May 11, 2014"
109 #define RESET_KIND_SHUTDOWN 0
110 #define RESET_KIND_INIT 1
111 #define RESET_KIND_SUSPEND 2
113 #define TG3_DEF_RX_MODE 0
114 #define TG3_DEF_TX_MODE 0
115 #define TG3_DEF_MSG_ENABLE \
125 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
127 /* length of time before we decide the hardware is borked,
128 * and dev->tx_timeout() should be called to fix the problem
131 #define TG3_TX_TIMEOUT (5 * HZ)
133 /* hardware minimum and maximum for a single frame's data payload */
134 #define TG3_MIN_MTU ETH_ZLEN
135 #define TG3_MAX_MTU(tp) \
136 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
138 /* These numbers seem to be hard coded in the NIC firmware somehow.
139 * You can't change the ring sizes, but you can change where you place
140 * them in the NIC onboard memory.
142 #define TG3_RX_STD_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
145 #define TG3_DEF_RX_RING_PENDING 200
146 #define TG3_RX_JMB_RING_SIZE(tp) \
147 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
148 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
149 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
151 /* Do not place this n-ring entries value into the tp struct itself,
152 * we really want to expose these constants to GCC so that modulo et
153 * al. operations are done with shifts and masks instead of with
154 * hw multiply/modulo instructions. Another solution would be to
155 * replace things like '% foo' with '& (foo - 1)'.
158 #define TG3_TX_RING_SIZE 512
159 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
161 #define TG3_RX_STD_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
163 #define TG3_RX_JMB_RING_BYTES(tp) \
164 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
165 #define TG3_RX_RCB_RING_BYTES(tp) \
166 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
167 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
169 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
171 #define TG3_DMA_BYTE_ENAB 64
173 #define TG3_RX_STD_DMA_SZ 1536
174 #define TG3_RX_JMB_DMA_SZ 9046
176 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
178 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
179 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
181 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
182 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
184 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
185 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
188 * that are at least dword aligned when used in PCIX mode. The driver
189 * works around this bug by double copying the packet. This workaround
190 * is built into the normal double copy length check for efficiency.
192 * However, the double copy is only necessary on those architectures
193 * where unaligned memory accesses are inefficient. For those architectures
194 * where unaligned memory accesses incur little penalty, we can reintegrate
195 * the 5701 in the normal rx path. Doing so saves a device structure
196 * dereference by hardcoding the double copy threshold in place.
198 #define TG3_RX_COPY_THRESHOLD 256
199 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
200 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
202 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
205 #if (NET_IP_ALIGN != 0)
206 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
208 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
211 /* minimum number of free TX descriptors required to wake up TX process */
212 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
213 #define TG3_TX_BD_DMA_MAX_2K 2048
214 #define TG3_TX_BD_DMA_MAX_4K 4096
216 #define TG3_RAW_IP_ALIGN 2
218 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
219 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
221 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
222 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
224 #define FIRMWARE_TG3 "tigon/tg3.bin"
225 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
226 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
227 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
229 static char version[] =
230 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
232 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
233 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
234 MODULE_LICENSE("GPL");
235 MODULE_VERSION(DRV_MODULE_VERSION);
236 MODULE_FIRMWARE(FIRMWARE_TG3);
237 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
240 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
241 module_param(tg3_debug, int, 0);
242 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
244 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
245 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
247 static const struct pci_device_id tg3_pci_tbl[] = {
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 TG3_DRV_DATA_FLAG_5705_10_100},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
275 TG3_DRV_DATA_FLAG_5705_10_100},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
282 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
296 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
297 PCI_VENDOR_ID_LENOVO,
298 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
299 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
302 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
322 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
325 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
340 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
342 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
351 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
352 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
353 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
354 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
355 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
356 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
357 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
358 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
359 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
360 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
361 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
362 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
366 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
368 static const struct {
369 const char string[ETH_GSTRING_LEN];
370 } ethtool_stats_keys[] = {
373 { "rx_ucast_packets" },
374 { "rx_mcast_packets" },
375 { "rx_bcast_packets" },
377 { "rx_align_errors" },
378 { "rx_xon_pause_rcvd" },
379 { "rx_xoff_pause_rcvd" },
380 { "rx_mac_ctrl_rcvd" },
381 { "rx_xoff_entered" },
382 { "rx_frame_too_long_errors" },
384 { "rx_undersize_packets" },
385 { "rx_in_length_errors" },
386 { "rx_out_length_errors" },
387 { "rx_64_or_less_octet_packets" },
388 { "rx_65_to_127_octet_packets" },
389 { "rx_128_to_255_octet_packets" },
390 { "rx_256_to_511_octet_packets" },
391 { "rx_512_to_1023_octet_packets" },
392 { "rx_1024_to_1522_octet_packets" },
393 { "rx_1523_to_2047_octet_packets" },
394 { "rx_2048_to_4095_octet_packets" },
395 { "rx_4096_to_8191_octet_packets" },
396 { "rx_8192_to_9022_octet_packets" },
403 { "tx_flow_control" },
405 { "tx_single_collisions" },
406 { "tx_mult_collisions" },
408 { "tx_excessive_collisions" },
409 { "tx_late_collisions" },
410 { "tx_collide_2times" },
411 { "tx_collide_3times" },
412 { "tx_collide_4times" },
413 { "tx_collide_5times" },
414 { "tx_collide_6times" },
415 { "tx_collide_7times" },
416 { "tx_collide_8times" },
417 { "tx_collide_9times" },
418 { "tx_collide_10times" },
419 { "tx_collide_11times" },
420 { "tx_collide_12times" },
421 { "tx_collide_13times" },
422 { "tx_collide_14times" },
423 { "tx_collide_15times" },
424 { "tx_ucast_packets" },
425 { "tx_mcast_packets" },
426 { "tx_bcast_packets" },
427 { "tx_carrier_sense_errors" },
431 { "dma_writeq_full" },
432 { "dma_write_prioq_full" },
436 { "rx_threshold_hit" },
438 { "dma_readq_full" },
439 { "dma_read_prioq_full" },
440 { "tx_comp_queue_full" },
442 { "ring_set_send_prod_index" },
443 { "ring_status_update" },
445 { "nic_avoided_irqs" },
446 { "nic_tx_threshold_hit" },
448 { "mbuf_lwm_thresh_hit" },
451 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
452 #define TG3_NVRAM_TEST 0
453 #define TG3_LINK_TEST 1
454 #define TG3_REGISTER_TEST 2
455 #define TG3_MEMORY_TEST 3
456 #define TG3_MAC_LOOPB_TEST 4
457 #define TG3_PHY_LOOPB_TEST 5
458 #define TG3_EXT_LOOPB_TEST 6
459 #define TG3_INTERRUPT_TEST 7
462 static const struct {
463 const char string[ETH_GSTRING_LEN];
464 } ethtool_test_keys[] = {
465 [TG3_NVRAM_TEST] = { "nvram test (online) " },
466 [TG3_LINK_TEST] = { "link test (online) " },
467 [TG3_REGISTER_TEST] = { "register test (offline)" },
468 [TG3_MEMORY_TEST] = { "memory test (offline)" },
469 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
470 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
471 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
472 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
475 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
478 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->regs + off);
483 static u32 tg3_read32(struct tg3 *tp, u32 off)
485 return readl(tp->regs + off);
488 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
490 writel(val, tp->aperegs + off);
493 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
495 return readl(tp->aperegs + off);
498 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
505 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
510 writel(val, tp->regs + off);
511 readl(tp->regs + off);
514 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
521 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
522 spin_unlock_irqrestore(&tp->indirect_lock, flags);
526 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
530 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
531 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
532 TG3_64BIT_REG_LOW, val);
535 if (off == TG3_RX_STD_PROD_IDX_REG) {
536 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
537 TG3_64BIT_REG_LOW, val);
541 spin_lock_irqsave(&tp->indirect_lock, flags);
542 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
544 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 /* In indirect mode when disabling interrupts, we also need
547 * to clear the interrupt bit in the GRC local ctrl register.
549 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
551 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
552 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
556 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
561 spin_lock_irqsave(&tp->indirect_lock, flags);
562 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
563 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
564 spin_unlock_irqrestore(&tp->indirect_lock, flags);
568 /* usec_wait specifies the wait time in usec when writing to certain registers
569 * where it is unsafe to read back the register without some delay.
570 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
571 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
573 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
575 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
576 /* Non-posted methods */
577 tp->write32(tp, off, val);
580 tg3_write32(tp, off, val);
585 /* Wait again after the read for the posted method to guarantee that
586 * the wait time is met.
592 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
594 tp->write32_mbox(tp, off, val);
595 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
596 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
597 !tg3_flag(tp, ICH_WORKAROUND)))
598 tp->read32_mbox(tp, off);
601 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
603 void __iomem *mbox = tp->regs + off;
605 if (tg3_flag(tp, TXD_MBOX_HWBUG))
607 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
608 tg3_flag(tp, FLUSH_POSTED_WRITES))
612 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
614 return readl(tp->regs + off + GRCMBOX_BASE);
617 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
619 writel(val, tp->regs + off + GRCMBOX_BASE);
622 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
623 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
624 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
625 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
626 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
628 #define tw32(reg, val) tp->write32(tp, reg, val)
629 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
630 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
631 #define tr32(reg) tp->read32(tp, reg)
633 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
637 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
638 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
641 spin_lock_irqsave(&tp->indirect_lock, flags);
642 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
644 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
646 /* Always leave this as zero. */
647 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
650 tw32_f(TG3PCI_MEM_WIN_DATA, val);
652 /* Always leave this as zero. */
653 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
655 spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
662 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
663 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
668 spin_lock_irqsave(&tp->indirect_lock, flags);
669 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
671 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
673 /* Always leave this as zero. */
674 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
677 *val = tr32(TG3PCI_MEM_WIN_DATA);
679 /* Always leave this as zero. */
680 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
682 spin_unlock_irqrestore(&tp->indirect_lock, flags);
685 static void tg3_ape_lock_init(struct tg3 *tp)
690 if (tg3_asic_rev(tp) == ASIC_REV_5761)
691 regbase = TG3_APE_LOCK_GRANT;
693 regbase = TG3_APE_PER_LOCK_GRANT;
695 /* Make sure the driver hasn't any stale locks. */
696 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
698 case TG3_APE_LOCK_PHY0:
699 case TG3_APE_LOCK_PHY1:
700 case TG3_APE_LOCK_PHY2:
701 case TG3_APE_LOCK_PHY3:
702 bit = APE_LOCK_GRANT_DRIVER;
706 bit = APE_LOCK_GRANT_DRIVER;
708 bit = 1 << tp->pci_fn;
710 tg3_ape_write32(tp, regbase + 4 * i, bit);
715 static int tg3_ape_lock(struct tg3 *tp, int locknum)
719 u32 status, req, gnt, bit;
721 if (!tg3_flag(tp, ENABLE_APE))
725 case TG3_APE_LOCK_GPIO:
726 if (tg3_asic_rev(tp) == ASIC_REV_5761)
728 case TG3_APE_LOCK_GRC:
729 case TG3_APE_LOCK_MEM:
731 bit = APE_LOCK_REQ_DRIVER;
733 bit = 1 << tp->pci_fn;
735 case TG3_APE_LOCK_PHY0:
736 case TG3_APE_LOCK_PHY1:
737 case TG3_APE_LOCK_PHY2:
738 case TG3_APE_LOCK_PHY3:
739 bit = APE_LOCK_REQ_DRIVER;
745 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
746 req = TG3_APE_LOCK_REQ;
747 gnt = TG3_APE_LOCK_GRANT;
749 req = TG3_APE_PER_LOCK_REQ;
750 gnt = TG3_APE_PER_LOCK_GRANT;
755 tg3_ape_write32(tp, req + off, bit);
757 /* Wait for up to 1 millisecond to acquire lock. */
758 for (i = 0; i < 100; i++) {
759 status = tg3_ape_read32(tp, gnt + off);
762 if (pci_channel_offline(tp->pdev))
769 /* Revoke the lock request. */
770 tg3_ape_write32(tp, gnt + off, bit);
777 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
781 if (!tg3_flag(tp, ENABLE_APE))
785 case TG3_APE_LOCK_GPIO:
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 case TG3_APE_LOCK_GRC:
789 case TG3_APE_LOCK_MEM:
791 bit = APE_LOCK_GRANT_DRIVER;
793 bit = 1 << tp->pci_fn;
795 case TG3_APE_LOCK_PHY0:
796 case TG3_APE_LOCK_PHY1:
797 case TG3_APE_LOCK_PHY2:
798 case TG3_APE_LOCK_PHY3:
799 bit = APE_LOCK_GRANT_DRIVER;
805 if (tg3_asic_rev(tp) == ASIC_REV_5761)
806 gnt = TG3_APE_LOCK_GRANT;
808 gnt = TG3_APE_PER_LOCK_GRANT;
810 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
813 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
818 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
825 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
828 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
831 return timeout_us ? 0 : -EBUSY;
834 #ifdef CONFIG_TIGON3_HWMON
835 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
839 for (i = 0; i < timeout_us / 10; i++) {
840 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
842 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
848 return i == timeout_us / 10;
851 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
855 u32 i, bufoff, msgoff, maxlen, apedata;
857 if (!tg3_flag(tp, APE_HAS_NCSI))
860 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
861 if (apedata != APE_SEG_SIG_MAGIC)
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
868 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
870 msgoff = bufoff + 2 * sizeof(u32);
871 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
876 /* Cap xfer sizes to scratchpad limits. */
877 length = (len > maxlen) ? maxlen : len;
880 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
881 if (!(apedata & APE_FW_STATUS_READY))
884 /* Wait for up to 1 msec for APE to service previous event. */
885 err = tg3_ape_event_lock(tp, 1000);
889 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
890 APE_EVENT_STATUS_SCRTCHPD_READ |
891 APE_EVENT_STATUS_EVENT_PENDING;
892 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
894 tg3_ape_write32(tp, bufoff, base_off);
895 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
897 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
898 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
902 if (tg3_ape_wait_for_event(tp, 30000))
905 for (i = 0; length; i += 4, length -= 4) {
906 u32 val = tg3_ape_read32(tp, msgoff + i);
907 memcpy(data, &val, sizeof(u32));
916 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
921 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
922 if (apedata != APE_SEG_SIG_MAGIC)
925 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
926 if (!(apedata & APE_FW_STATUS_READY))
929 /* Wait for up to 20 millisecond for APE to service previous event. */
930 err = tg3_ape_event_lock(tp, 20000);
934 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
935 event | APE_EVENT_STATUS_EVENT_PENDING);
937 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
938 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
943 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
948 if (!tg3_flag(tp, ENABLE_APE))
952 case RESET_KIND_INIT:
953 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
955 APE_HOST_SEG_SIG_MAGIC);
956 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
957 APE_HOST_SEG_LEN_MAGIC);
958 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
959 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
960 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
961 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
962 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
963 APE_HOST_BEHAV_NO_PHYLOCK);
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
965 TG3_APE_HOST_DRVR_STATE_START);
967 event = APE_EVENT_STATUS_STATE_START;
969 case RESET_KIND_SHUTDOWN:
970 if (device_may_wakeup(&tp->pdev->dev) &&
971 tg3_flag(tp, WOL_ENABLE)) {
972 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
973 TG3_APE_HOST_WOL_SPEED_AUTO);
974 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
976 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
978 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
980 event = APE_EVENT_STATUS_STATE_UNLOAD;
986 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
988 tg3_ape_send_event(tp, event);
991 static void tg3_send_ape_heartbeat(struct tg3 *tp,
992 unsigned long interval)
994 /* Check if hb interval has exceeded */
995 if (!tg3_flag(tp, ENABLE_APE) ||
996 time_before(jiffies, tp->ape_hb_jiffies + interval))
999 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1000 tp->ape_hb_jiffies = jiffies;
1003 static void tg3_disable_ints(struct tg3 *tp)
1007 tw32(TG3PCI_MISC_HOST_CTRL,
1008 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1009 for (i = 0; i < tp->irq_max; i++)
1010 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1013 static void tg3_enable_ints(struct tg3 *tp)
1020 tw32(TG3PCI_MISC_HOST_CTRL,
1021 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1023 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1024 for (i = 0; i < tp->irq_cnt; i++) {
1025 struct tg3_napi *tnapi = &tp->napi[i];
1027 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028 if (tg3_flag(tp, 1SHOT_MSI))
1029 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031 tp->coal_now |= tnapi->coal_now;
1034 /* Force an initial interrupt */
1035 if (!tg3_flag(tp, TAGGED_STATUS) &&
1036 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1037 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1039 tw32(HOSTCC_MODE, tp->coal_now);
1041 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1044 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1046 struct tg3 *tp = tnapi->tp;
1047 struct tg3_hw_status *sblk = tnapi->hw_status;
1048 unsigned int work_exists = 0;
1050 /* check for phy events */
1051 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1052 if (sblk->status & SD_STATUS_LINK_CHG)
1056 /* check for TX work to do */
1057 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1060 /* check for RX work to do */
1061 if (tnapi->rx_rcb_prod_idx &&
1062 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1069 * similar to tg3_enable_ints, but it accurately determines whether there
1070 * is new work pending and can return without flushing the PIO write
1071 * which reenables interrupts
1073 static void tg3_int_reenable(struct tg3_napi *tnapi)
1075 struct tg3 *tp = tnapi->tp;
1077 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1080 /* When doing tagged status, this work check is unnecessary.
1081 * The last_tag we write above tells the chip which piece of
1082 * work we've completed.
1084 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1085 tw32(HOSTCC_MODE, tp->coalesce_mode |
1086 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1089 static void tg3_switch_clocks(struct tg3 *tp)
1092 u32 orig_clock_ctrl;
1094 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1097 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1099 orig_clock_ctrl = clock_ctrl;
1100 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1101 CLOCK_CTRL_CLKRUN_OENABLE |
1103 tp->pci_clock_ctrl = clock_ctrl;
1105 if (tg3_flag(tp, 5705_PLUS)) {
1106 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1107 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1110 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1111 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1113 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1115 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1116 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1119 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1122 #define PHY_BUSY_LOOPS 5000
1124 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1131 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1133 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1137 tg3_ape_lock(tp, tp->phy_ape_lock);
1141 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1142 MI_COM_PHY_ADDR_MASK);
1143 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1144 MI_COM_REG_ADDR_MASK);
1145 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1147 tw32_f(MAC_MI_COM, frame_val);
1149 loops = PHY_BUSY_LOOPS;
1150 while (loops != 0) {
1152 frame_val = tr32(MAC_MI_COM);
1154 if ((frame_val & MI_COM_BUSY) == 0) {
1156 frame_val = tr32(MAC_MI_COM);
1164 *val = frame_val & MI_COM_DATA_MASK;
1168 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1169 tw32_f(MAC_MI_MODE, tp->mi_mode);
1173 tg3_ape_unlock(tp, tp->phy_ape_lock);
1178 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1180 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1183 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1190 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1191 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1194 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1196 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1200 tg3_ape_lock(tp, tp->phy_ape_lock);
1202 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1203 MI_COM_PHY_ADDR_MASK);
1204 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1205 MI_COM_REG_ADDR_MASK);
1206 frame_val |= (val & MI_COM_DATA_MASK);
1207 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1209 tw32_f(MAC_MI_COM, frame_val);
1211 loops = PHY_BUSY_LOOPS;
1212 while (loops != 0) {
1214 frame_val = tr32(MAC_MI_COM);
1215 if ((frame_val & MI_COM_BUSY) == 0) {
1217 frame_val = tr32(MAC_MI_COM);
1227 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1228 tw32_f(MAC_MI_MODE, tp->mi_mode);
1232 tg3_ape_unlock(tp, tp->phy_ape_lock);
1237 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1239 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1242 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1273 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1277 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1278 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1282 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1288 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1292 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1294 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1299 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1303 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1305 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1310 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1314 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1315 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1316 MII_TG3_AUXCTL_SHDWSEL_MISC);
1318 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1323 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1325 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1326 set |= MII_TG3_AUXCTL_MISC_WREN;
1328 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1331 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1336 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1342 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1344 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1346 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1347 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1352 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1354 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1355 reg | val | MII_TG3_MISC_SHDW_WREN);
1358 static int tg3_bmcr_reset(struct tg3 *tp)
1363 /* OK, reset it, and poll the BMCR_RESET bit until it
1364 * clears or we time out.
1366 phy_control = BMCR_RESET;
1367 err = tg3_writephy(tp, MII_BMCR, phy_control);
1373 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1377 if ((phy_control & BMCR_RESET) == 0) {
1389 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1391 struct tg3 *tp = bp->priv;
1394 spin_lock_bh(&tp->lock);
1396 if (__tg3_readphy(tp, mii_id, reg, &val))
1399 spin_unlock_bh(&tp->lock);
1404 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1406 struct tg3 *tp = bp->priv;
1409 spin_lock_bh(&tp->lock);
1411 if (__tg3_writephy(tp, mii_id, reg, val))
1414 spin_unlock_bh(&tp->lock);
1419 static void tg3_mdio_config_5785(struct tg3 *tp)
1422 struct phy_device *phydev;
1424 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1425 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1426 case PHY_ID_BCM50610:
1427 case PHY_ID_BCM50610M:
1428 val = MAC_PHYCFG2_50610_LED_MODES;
1430 case PHY_ID_BCMAC131:
1431 val = MAC_PHYCFG2_AC131_LED_MODES;
1433 case PHY_ID_RTL8211C:
1434 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1436 case PHY_ID_RTL8201E:
1437 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1443 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1444 tw32(MAC_PHYCFG2, val);
1446 val = tr32(MAC_PHYCFG1);
1447 val &= ~(MAC_PHYCFG1_RGMII_INT |
1448 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1450 tw32(MAC_PHYCFG1, val);
1455 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1456 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1457 MAC_PHYCFG2_FMODE_MASK_MASK |
1458 MAC_PHYCFG2_GMODE_MASK_MASK |
1459 MAC_PHYCFG2_ACT_MASK_MASK |
1460 MAC_PHYCFG2_QUAL_MASK_MASK |
1461 MAC_PHYCFG2_INBAND_ENABLE;
1463 tw32(MAC_PHYCFG2, val);
1465 val = tr32(MAC_PHYCFG1);
1466 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1467 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1468 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1469 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1470 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1474 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1475 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1476 tw32(MAC_PHYCFG1, val);
1478 val = tr32(MAC_EXT_RGMII_MODE);
1479 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1480 MAC_RGMII_MODE_RX_QUALITY |
1481 MAC_RGMII_MODE_RX_ACTIVITY |
1482 MAC_RGMII_MODE_RX_ENG_DET |
1483 MAC_RGMII_MODE_TX_ENABLE |
1484 MAC_RGMII_MODE_TX_LOWPWR |
1485 MAC_RGMII_MODE_TX_RESET);
1486 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1487 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1488 val |= MAC_RGMII_MODE_RX_INT_B |
1489 MAC_RGMII_MODE_RX_QUALITY |
1490 MAC_RGMII_MODE_RX_ACTIVITY |
1491 MAC_RGMII_MODE_RX_ENG_DET;
1492 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1493 val |= MAC_RGMII_MODE_TX_ENABLE |
1494 MAC_RGMII_MODE_TX_LOWPWR |
1495 MAC_RGMII_MODE_TX_RESET;
1497 tw32(MAC_EXT_RGMII_MODE, val);
1500 static void tg3_mdio_start(struct tg3 *tp)
1502 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1503 tw32_f(MAC_MI_MODE, tp->mi_mode);
1506 if (tg3_flag(tp, MDIOBUS_INITED) &&
1507 tg3_asic_rev(tp) == ASIC_REV_5785)
1508 tg3_mdio_config_5785(tp);
1511 static int tg3_mdio_init(struct tg3 *tp)
1515 struct phy_device *phydev;
1517 if (tg3_flag(tp, 5717_PLUS)) {
1520 tp->phy_addr = tp->pci_fn + 1;
1522 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1523 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1525 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1526 TG3_CPMU_PHY_STRAP_IS_SERDES;
1529 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1532 addr = ssb_gige_get_phyaddr(tp->pdev);
1535 tp->phy_addr = addr;
1537 tp->phy_addr = TG3_PHY_MII_ADDR;
1541 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1544 tp->mdio_bus = mdiobus_alloc();
1545 if (tp->mdio_bus == NULL)
1548 tp->mdio_bus->name = "tg3 mdio bus";
1549 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1550 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1551 tp->mdio_bus->priv = tp;
1552 tp->mdio_bus->parent = &tp->pdev->dev;
1553 tp->mdio_bus->read = &tg3_mdio_read;
1554 tp->mdio_bus->write = &tg3_mdio_write;
1555 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1557 /* The bus registration will look for all the PHYs on the mdio bus.
1558 * Unfortunately, it does not ensure the PHY is powered up before
1559 * accessing the PHY ID registers. A chip reset is the
1560 * quickest way to bring the device back to an operational state..
1562 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1565 i = mdiobus_register(tp->mdio_bus);
1567 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1568 mdiobus_free(tp->mdio_bus);
1572 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1574 if (!phydev || !phydev->drv) {
1575 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1576 mdiobus_unregister(tp->mdio_bus);
1577 mdiobus_free(tp->mdio_bus);
1581 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1582 case PHY_ID_BCM57780:
1583 phydev->interface = PHY_INTERFACE_MODE_GMII;
1584 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1586 case PHY_ID_BCM50610:
1587 case PHY_ID_BCM50610M:
1588 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1589 PHY_BRCM_RX_REFCLK_UNUSED |
1590 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1591 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1593 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1594 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1595 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1596 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1597 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1599 case PHY_ID_RTL8211C:
1600 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1602 case PHY_ID_RTL8201E:
1603 case PHY_ID_BCMAC131:
1604 phydev->interface = PHY_INTERFACE_MODE_MII;
1605 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1606 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1610 tg3_flag_set(tp, MDIOBUS_INITED);
1612 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1613 tg3_mdio_config_5785(tp);
1618 static void tg3_mdio_fini(struct tg3 *tp)
1620 if (tg3_flag(tp, MDIOBUS_INITED)) {
1621 tg3_flag_clear(tp, MDIOBUS_INITED);
1622 mdiobus_unregister(tp->mdio_bus);
1623 mdiobus_free(tp->mdio_bus);
1627 /* tp->lock is held. */
1628 static inline void tg3_generate_fw_event(struct tg3 *tp)
1632 val = tr32(GRC_RX_CPU_EVENT);
1633 val |= GRC_RX_CPU_DRIVER_EVENT;
1634 tw32_f(GRC_RX_CPU_EVENT, val);
1636 tp->last_event_jiffies = jiffies;
1639 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1641 /* tp->lock is held. */
1642 static void tg3_wait_for_event_ack(struct tg3 *tp)
1645 unsigned int delay_cnt;
1648 /* If enough time has passed, no wait is necessary. */
1649 time_remain = (long)(tp->last_event_jiffies + 1 +
1650 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1652 if (time_remain < 0)
1655 /* Check if we can shorten the wait time. */
1656 delay_cnt = jiffies_to_usecs(time_remain);
1657 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1658 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1659 delay_cnt = (delay_cnt >> 3) + 1;
1661 for (i = 0; i < delay_cnt; i++) {
1662 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1664 if (pci_channel_offline(tp->pdev))
1671 /* tp->lock is held. */
1672 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1677 if (!tg3_readphy(tp, MII_BMCR, ®))
1679 if (!tg3_readphy(tp, MII_BMSR, ®))
1680 val |= (reg & 0xffff);
1684 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1686 if (!tg3_readphy(tp, MII_LPA, ®))
1687 val |= (reg & 0xffff);
1691 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1692 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1694 if (!tg3_readphy(tp, MII_STAT1000, ®))
1695 val |= (reg & 0xffff);
1699 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1706 /* tp->lock is held. */
1707 static void tg3_ump_link_report(struct tg3 *tp)
1711 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1714 tg3_phy_gather_ump_data(tp, data);
1716 tg3_wait_for_event_ack(tp);
1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1723 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1725 tg3_generate_fw_event(tp);
1728 /* tp->lock is held. */
1729 static void tg3_stop_fw(struct tg3 *tp)
1731 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1732 /* Wait for RX cpu to ACK the previous event. */
1733 tg3_wait_for_event_ack(tp);
1735 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1737 tg3_generate_fw_event(tp);
1739 /* Wait for RX cpu to ACK this event. */
1740 tg3_wait_for_event_ack(tp);
1744 /* tp->lock is held. */
1745 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1747 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1748 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762 case RESET_KIND_SUSPEND:
1763 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1773 /* tp->lock is held. */
1774 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1776 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1778 case RESET_KIND_INIT:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 DRV_STATE_START_DONE);
1783 case RESET_KIND_SHUTDOWN:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 DRV_STATE_UNLOAD_DONE);
1794 /* tp->lock is held. */
1795 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1797 if (tg3_flag(tp, ENABLE_ASF)) {
1799 case RESET_KIND_INIT:
1800 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 case RESET_KIND_SHUTDOWN:
1805 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809 case RESET_KIND_SUSPEND:
1810 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1820 static int tg3_poll_fw(struct tg3 *tp)
1825 if (tg3_flag(tp, NO_FWARE_REPORTED))
1828 if (tg3_flag(tp, IS_SSB_CORE)) {
1829 /* We don't use firmware. */
1833 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1834 /* Wait up to 20ms for init done. */
1835 for (i = 0; i < 200; i++) {
1836 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1838 if (pci_channel_offline(tp->pdev))
1846 /* Wait for firmware initialization to complete. */
1847 for (i = 0; i < 100000; i++) {
1848 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1849 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1851 if (pci_channel_offline(tp->pdev)) {
1852 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854 netdev_info(tp->dev, "No firmware running\n");
1863 /* Chip might not be fitted with firmware. Some Sun onboard
1864 * parts are configured like that. So don't signal the timeout
1865 * of the above loop as an error, but do report the lack of
1866 * running firmware once.
1868 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1869 tg3_flag_set(tp, NO_FWARE_REPORTED);
1871 netdev_info(tp->dev, "No firmware running\n");
1874 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1875 /* The 57765 A0 needs a little more
1876 * time to do some important work.
1884 static void tg3_link_report(struct tg3 *tp)
1886 if (!netif_carrier_ok(tp->dev)) {
1887 netif_info(tp, link, tp->dev, "Link is down\n");
1888 tg3_ump_link_report(tp);
1889 } else if (netif_msg_link(tp)) {
1890 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1891 (tp->link_config.active_speed == SPEED_1000 ?
1893 (tp->link_config.active_speed == SPEED_100 ?
1895 (tp->link_config.active_duplex == DUPLEX_FULL ?
1898 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1899 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1901 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1904 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1905 netdev_info(tp->dev, "EEE is %s\n",
1906 tp->setlpicnt ? "enabled" : "disabled");
1908 tg3_ump_link_report(tp);
1911 tp->link_up = netif_carrier_ok(tp->dev);
1914 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1918 if (adv & ADVERTISE_PAUSE_CAP) {
1919 flowctrl |= FLOW_CTRL_RX;
1920 if (!(adv & ADVERTISE_PAUSE_ASYM))
1921 flowctrl |= FLOW_CTRL_TX;
1922 } else if (adv & ADVERTISE_PAUSE_ASYM)
1923 flowctrl |= FLOW_CTRL_TX;
1928 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1932 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1933 miireg = ADVERTISE_1000XPAUSE;
1934 else if (flow_ctrl & FLOW_CTRL_TX)
1935 miireg = ADVERTISE_1000XPSE_ASYM;
1936 else if (flow_ctrl & FLOW_CTRL_RX)
1937 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1944 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1948 if (adv & ADVERTISE_1000XPAUSE) {
1949 flowctrl |= FLOW_CTRL_RX;
1950 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1951 flowctrl |= FLOW_CTRL_TX;
1952 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1953 flowctrl |= FLOW_CTRL_TX;
1958 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1962 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1963 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1964 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1965 if (lcladv & ADVERTISE_1000XPAUSE)
1967 if (rmtadv & ADVERTISE_1000XPAUSE)
1974 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1978 u32 old_rx_mode = tp->rx_mode;
1979 u32 old_tx_mode = tp->tx_mode;
1981 if (tg3_flag(tp, USE_PHYLIB))
1982 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1984 autoneg = tp->link_config.autoneg;
1986 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1987 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1988 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1990 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1992 flowctrl = tp->link_config.flowctrl;
1994 tp->link_config.active_flowctrl = flowctrl;
1996 if (flowctrl & FLOW_CTRL_RX)
1997 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1999 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2001 if (old_rx_mode != tp->rx_mode)
2002 tw32_f(MAC_RX_MODE, tp->rx_mode);
2004 if (flowctrl & FLOW_CTRL_TX)
2005 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2007 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2009 if (old_tx_mode != tp->tx_mode)
2010 tw32_f(MAC_TX_MODE, tp->tx_mode);
2013 static void tg3_adjust_link(struct net_device *dev)
2015 u8 oldflowctrl, linkmesg = 0;
2016 u32 mac_mode, lcl_adv, rmt_adv;
2017 struct tg3 *tp = netdev_priv(dev);
2018 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2020 spin_lock_bh(&tp->lock);
2022 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2023 MAC_MODE_HALF_DUPLEX);
2025 oldflowctrl = tp->link_config.active_flowctrl;
2031 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2032 mac_mode |= MAC_MODE_PORT_MODE_MII;
2033 else if (phydev->speed == SPEED_1000 ||
2034 tg3_asic_rev(tp) != ASIC_REV_5785)
2035 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2037 mac_mode |= MAC_MODE_PORT_MODE_MII;
2039 if (phydev->duplex == DUPLEX_HALF)
2040 mac_mode |= MAC_MODE_HALF_DUPLEX;
2042 lcl_adv = mii_advertise_flowctrl(
2043 tp->link_config.flowctrl);
2046 rmt_adv = LPA_PAUSE_CAP;
2047 if (phydev->asym_pause)
2048 rmt_adv |= LPA_PAUSE_ASYM;
2051 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2053 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2055 if (mac_mode != tp->mac_mode) {
2056 tp->mac_mode = mac_mode;
2057 tw32_f(MAC_MODE, tp->mac_mode);
2061 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2062 if (phydev->speed == SPEED_10)
2064 MAC_MI_STAT_10MBPS_MODE |
2065 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2067 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2070 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2071 tw32(MAC_TX_LENGTHS,
2072 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2073 (6 << TX_LENGTHS_IPG_SHIFT) |
2074 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2076 tw32(MAC_TX_LENGTHS,
2077 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2078 (6 << TX_LENGTHS_IPG_SHIFT) |
2079 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2081 if (phydev->link != tp->old_link ||
2082 phydev->speed != tp->link_config.active_speed ||
2083 phydev->duplex != tp->link_config.active_duplex ||
2084 oldflowctrl != tp->link_config.active_flowctrl)
2087 tp->old_link = phydev->link;
2088 tp->link_config.active_speed = phydev->speed;
2089 tp->link_config.active_duplex = phydev->duplex;
2091 spin_unlock_bh(&tp->lock);
2094 tg3_link_report(tp);
2097 static int tg3_phy_init(struct tg3 *tp)
2099 struct phy_device *phydev;
2101 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2104 /* Bring the PHY back to a known state. */
2107 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2109 /* Attach the MAC to the PHY. */
2110 phydev = phy_connect(tp->dev, phydev_name(phydev),
2111 tg3_adjust_link, phydev->interface);
2112 if (IS_ERR(phydev)) {
2113 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2114 return PTR_ERR(phydev);
2117 /* Mask with MAC supported features. */
2118 switch (phydev->interface) {
2119 case PHY_INTERFACE_MODE_GMII:
2120 case PHY_INTERFACE_MODE_RGMII:
2121 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2122 phydev->supported &= (PHY_GBIT_FEATURES |
2124 SUPPORTED_Asym_Pause);
2128 case PHY_INTERFACE_MODE_MII:
2129 phydev->supported &= (PHY_BASIC_FEATURES |
2131 SUPPORTED_Asym_Pause);
2134 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2138 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2140 phydev->advertising = phydev->supported;
2142 phy_attached_info(phydev);
2147 static void tg3_phy_start(struct tg3 *tp)
2149 struct phy_device *phydev;
2151 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2156 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2157 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2158 phydev->speed = tp->link_config.speed;
2159 phydev->duplex = tp->link_config.duplex;
2160 phydev->autoneg = tp->link_config.autoneg;
2161 phydev->advertising = tp->link_config.advertising;
2166 phy_start_aneg(phydev);
2169 static void tg3_phy_stop(struct tg3 *tp)
2171 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2174 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2177 static void tg3_phy_fini(struct tg3 *tp)
2179 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2180 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2181 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2185 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2190 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2193 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2194 /* Cannot do read-modify-write on 5401 */
2195 err = tg3_phy_auxctl_write(tp,
2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2197 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2202 err = tg3_phy_auxctl_read(tp,
2203 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2207 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2208 err = tg3_phy_auxctl_write(tp,
2209 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2215 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2219 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2222 tg3_writephy(tp, MII_TG3_FET_TEST,
2223 phytest | MII_TG3_FET_SHADOW_EN);
2224 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2226 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2228 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2229 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2231 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2235 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2239 if (!tg3_flag(tp, 5705_PLUS) ||
2240 (tg3_flag(tp, 5717_PLUS) &&
2241 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2244 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2245 tg3_phy_fet_toggle_apd(tp, enable);
2249 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2250 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2251 MII_TG3_MISC_SHDW_SCR5_SDTL |
2252 MII_TG3_MISC_SHDW_SCR5_C125OE;
2253 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2254 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2256 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2259 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2261 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2263 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2266 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2270 if (!tg3_flag(tp, 5705_PLUS) ||
2271 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2274 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2277 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2278 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2280 tg3_writephy(tp, MII_TG3_FET_TEST,
2281 ephy | MII_TG3_FET_SHADOW_EN);
2282 if (!tg3_readphy(tp, reg, &phy)) {
2284 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2286 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2287 tg3_writephy(tp, reg, phy);
2289 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2294 ret = tg3_phy_auxctl_read(tp,
2295 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2298 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2300 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2301 tg3_phy_auxctl_write(tp,
2302 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2307 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2312 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2315 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2317 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2318 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2321 static void tg3_phy_apply_otp(struct tg3 *tp)
2330 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2333 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2334 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2335 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2337 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2338 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2341 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2342 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2343 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2345 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2346 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2348 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2349 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2351 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2352 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2353 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2355 tg3_phy_toggle_auxctl_smdsp(tp, false);
2358 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2361 struct ethtool_eee *dest = &tp->eee;
2363 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2369 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2372 /* Pull eee_active */
2373 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2374 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2375 dest->eee_active = 1;
2377 dest->eee_active = 0;
2379 /* Pull lp advertised settings */
2380 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2382 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2384 /* Pull advertised and eee_enabled settings */
2385 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2387 dest->eee_enabled = !!val;
2388 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2390 /* Pull tx_lpi_enabled */
2391 val = tr32(TG3_CPMU_EEE_MODE);
2392 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2394 /* Pull lpi timer value */
2395 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2398 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2402 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2407 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2409 tp->link_config.active_duplex == DUPLEX_FULL &&
2410 (tp->link_config.active_speed == SPEED_100 ||
2411 tp->link_config.active_speed == SPEED_1000)) {
2414 if (tp->link_config.active_speed == SPEED_1000)
2415 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2417 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2419 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2421 tg3_eee_pull_config(tp, NULL);
2422 if (tp->eee.eee_active)
2426 if (!tp->setlpicnt) {
2427 if (current_link_up &&
2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2430 tg3_phy_toggle_auxctl_smdsp(tp, false);
2433 val = tr32(TG3_CPMU_EEE_MODE);
2434 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2438 static void tg3_phy_eee_enable(struct tg3 *tp)
2442 if (tp->link_config.active_speed == SPEED_1000 &&
2443 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2444 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2445 tg3_flag(tp, 57765_CLASS)) &&
2446 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2447 val = MII_TG3_DSP_TAP26_ALNOKO |
2448 MII_TG3_DSP_TAP26_RMRXSTO;
2449 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2450 tg3_phy_toggle_auxctl_smdsp(tp, false);
2453 val = tr32(TG3_CPMU_EEE_MODE);
2454 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2457 static int tg3_wait_macro_done(struct tg3 *tp)
2464 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2465 if ((tmp32 & 0x1000) == 0)
2475 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2477 static const u32 test_pat[4][6] = {
2478 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2479 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2480 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2481 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2485 for (chan = 0; chan < 4; chan++) {
2488 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2489 (chan * 0x2000) | 0x0200);
2490 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2492 for (i = 0; i < 6; i++)
2493 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2496 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2497 if (tg3_wait_macro_done(tp)) {
2502 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2503 (chan * 0x2000) | 0x0200);
2504 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2505 if (tg3_wait_macro_done(tp)) {
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2511 if (tg3_wait_macro_done(tp)) {
2516 for (i = 0; i < 6; i += 2) {
2519 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2520 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2521 tg3_wait_macro_done(tp)) {
2527 if (low != test_pat[chan][i] ||
2528 high != test_pat[chan][i+1]) {
2529 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2530 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2531 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2541 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2545 for (chan = 0; chan < 4; chan++) {
2548 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2549 (chan * 0x2000) | 0x0200);
2550 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2551 for (i = 0; i < 6; i++)
2552 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2553 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2554 if (tg3_wait_macro_done(tp))
2561 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2563 u32 reg32, phy9_orig;
2564 int retries, do_phy_reset, err;
2570 err = tg3_bmcr_reset(tp);
2576 /* Disable transmitter and interrupt. */
2577 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2581 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2583 /* Set full-duplex, 1000 mbps. */
2584 tg3_writephy(tp, MII_BMCR,
2585 BMCR_FULLDPLX | BMCR_SPEED1000);
2587 /* Set to master mode. */
2588 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2591 tg3_writephy(tp, MII_CTRL1000,
2592 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2594 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2598 /* Block the PHY control access. */
2599 tg3_phydsp_write(tp, 0x8005, 0x0800);
2601 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2604 } while (--retries);
2606 err = tg3_phy_reset_chanpat(tp);
2610 tg3_phydsp_write(tp, 0x8005, 0x0000);
2612 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2613 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2615 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2619 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2624 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2629 static void tg3_carrier_off(struct tg3 *tp)
2631 netif_carrier_off(tp->dev);
2632 tp->link_up = false;
2635 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2637 if (tg3_flag(tp, ENABLE_ASF))
2638 netdev_warn(tp->dev,
2639 "Management side-band traffic will be interrupted during phy settings change\n");
2642 /* This will reset the tigon3 PHY if there is no valid
2643 * link unless the FORCE argument is non-zero.
2645 static int tg3_phy_reset(struct tg3 *tp)
2650 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2651 val = tr32(GRC_MISC_CFG);
2652 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2655 err = tg3_readphy(tp, MII_BMSR, &val);
2656 err |= tg3_readphy(tp, MII_BMSR, &val);
2660 if (netif_running(tp->dev) && tp->link_up) {
2661 netif_carrier_off(tp->dev);
2662 tg3_link_report(tp);
2665 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2666 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2667 tg3_asic_rev(tp) == ASIC_REV_5705) {
2668 err = tg3_phy_reset_5703_4_5(tp);
2675 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2676 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2677 cpmuctrl = tr32(TG3_CPMU_CTRL);
2678 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2680 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2683 err = tg3_bmcr_reset(tp);
2687 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2688 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2691 tw32(TG3_CPMU_CTRL, cpmuctrl);
2694 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2695 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2696 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2697 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2698 CPMU_LSPD_1000MB_MACCLK_12_5) {
2699 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2701 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2705 if (tg3_flag(tp, 5717_PLUS) &&
2706 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2709 tg3_phy_apply_otp(tp);
2711 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2712 tg3_phy_toggle_apd(tp, true);
2714 tg3_phy_toggle_apd(tp, false);
2717 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2718 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2720 tg3_phydsp_write(tp, 0x000a, 0x0323);
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2724 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2725 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2726 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2729 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2730 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2731 tg3_phydsp_write(tp, 0x000a, 0x310b);
2732 tg3_phydsp_write(tp, 0x201f, 0x9506);
2733 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2734 tg3_phy_toggle_auxctl_smdsp(tp, false);
2736 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2737 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2738 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2739 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2740 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2741 tg3_writephy(tp, MII_TG3_TEST1,
2742 MII_TG3_TEST1_TRIM_EN | 0x4);
2744 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2746 tg3_phy_toggle_auxctl_smdsp(tp, false);
2750 /* Set Extended packet length bit (bit 14) on all chips that */
2751 /* support jumbo frames */
2752 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2753 /* Cannot do read-modify-write on 5401 */
2754 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2755 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756 /* Set bit 14 with read-modify-write to preserve other bits */
2757 err = tg3_phy_auxctl_read(tp,
2758 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2760 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2761 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2764 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2765 * jumbo frames transmission.
2767 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2768 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2769 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2773 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2774 /* adjust output voltage */
2775 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2778 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2779 tg3_phydsp_write(tp, 0xffb, 0x4000);
2781 tg3_phy_toggle_automdix(tp, true);
2782 tg3_phy_set_wirespeed(tp);
2786 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2787 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2788 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2789 TG3_GPIO_MSG_NEED_VAUX)
2790 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2791 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2792 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2793 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2794 (TG3_GPIO_MSG_DRVR_PRES << 12))
2796 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2797 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2798 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2799 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2800 (TG3_GPIO_MSG_NEED_VAUX << 12))
2802 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2806 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5719)
2808 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2810 status = tr32(TG3_CPMU_DRV_STATUS);
2812 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2813 status &= ~(TG3_GPIO_MSG_MASK << shift);
2814 status |= (newstat << shift);
2816 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2817 tg3_asic_rev(tp) == ASIC_REV_5719)
2818 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2820 tw32(TG3_CPMU_DRV_STATUS, status);
2822 return status >> TG3_APE_GPIO_MSG_SHIFT;
2825 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2827 if (!tg3_flag(tp, IS_NIC))
2830 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2831 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2832 tg3_asic_rev(tp) == ASIC_REV_5720) {
2833 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2836 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2838 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2839 TG3_GRC_LCLCTL_PWRSW_DELAY);
2841 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2843 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2844 TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2854 if (!tg3_flag(tp, IS_NIC) ||
2855 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2856 tg3_asic_rev(tp) == ASIC_REV_5701)
2859 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2861 tw32_wait_f(GRC_LOCAL_CTRL,
2862 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2863 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2869 tw32_wait_f(GRC_LOCAL_CTRL,
2870 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2871 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2876 if (!tg3_flag(tp, IS_NIC))
2879 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2880 tg3_asic_rev(tp) == ASIC_REV_5701) {
2881 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2882 (GRC_LCLCTRL_GPIO_OE0 |
2883 GRC_LCLCTRL_GPIO_OE1 |
2884 GRC_LCLCTRL_GPIO_OE2 |
2885 GRC_LCLCTRL_GPIO_OUTPUT0 |
2886 GRC_LCLCTRL_GPIO_OUTPUT1),
2887 TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2889 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2890 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2891 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2892 GRC_LCLCTRL_GPIO_OE1 |
2893 GRC_LCLCTRL_GPIO_OE2 |
2894 GRC_LCLCTRL_GPIO_OUTPUT0 |
2895 GRC_LCLCTRL_GPIO_OUTPUT1 |
2897 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2901 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902 TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2905 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2906 TG3_GRC_LCLCTL_PWRSW_DELAY);
2909 u32 grc_local_ctrl = 0;
2911 /* Workaround to prevent overdrawing Amps. */
2912 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2913 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2914 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 /* On 5753 and variants, GPIO2 cannot be used. */
2920 no_gpio2 = tp->nic_sram_data_cfg &
2921 NIC_SRAM_DATA_CFG_NO_GPIO2;
2923 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2924 GRC_LCLCTRL_GPIO_OE1 |
2925 GRC_LCLCTRL_GPIO_OE2 |
2926 GRC_LCLCTRL_GPIO_OUTPUT1 |
2927 GRC_LCLCTRL_GPIO_OUTPUT2;
2929 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2930 GRC_LCLCTRL_GPIO_OUTPUT2);
2932 tw32_wait_f(GRC_LOCAL_CTRL,
2933 tp->grc_local_ctrl | grc_local_ctrl,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2938 tw32_wait_f(GRC_LOCAL_CTRL,
2939 tp->grc_local_ctrl | grc_local_ctrl,
2940 TG3_GRC_LCLCTL_PWRSW_DELAY);
2943 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2944 tw32_wait_f(GRC_LOCAL_CTRL,
2945 tp->grc_local_ctrl | grc_local_ctrl,
2946 TG3_GRC_LCLCTL_PWRSW_DELAY);
2951 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2955 /* Serialize power state transitions */
2956 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2959 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2960 msg = TG3_GPIO_MSG_NEED_VAUX;
2962 msg = tg3_set_function_status(tp, msg);
2964 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2967 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2968 tg3_pwrsrc_switch_to_vaux(tp);
2970 tg3_pwrsrc_die_with_vmain(tp);
2973 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2976 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2978 bool need_vaux = false;
2980 /* The GPIOs do something completely different on 57765. */
2981 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2984 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2986 tg3_asic_rev(tp) == ASIC_REV_5720) {
2987 tg3_frob_aux_power_5717(tp, include_wol ?
2988 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2992 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2993 struct net_device *dev_peer;
2995 dev_peer = pci_get_drvdata(tp->pdev_peer);
2997 /* remove_one() may have been run on the peer. */
2999 struct tg3 *tp_peer = netdev_priv(dev_peer);
3001 if (tg3_flag(tp_peer, INIT_COMPLETE))
3004 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3005 tg3_flag(tp_peer, ENABLE_ASF))
3010 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3011 tg3_flag(tp, ENABLE_ASF))
3015 tg3_pwrsrc_switch_to_vaux(tp);
3017 tg3_pwrsrc_die_with_vmain(tp);
3020 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3022 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3024 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3025 if (speed != SPEED_10)
3027 } else if (speed == SPEED_10)
3033 static bool tg3_phy_power_bug(struct tg3 *tp)
3035 switch (tg3_asic_rev(tp)) {
3040 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3049 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3058 static bool tg3_phy_led_bug(struct tg3 *tp)
3060 switch (tg3_asic_rev(tp)) {
3063 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3072 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3076 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3079 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3080 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3081 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3082 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3085 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3086 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3087 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3092 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3094 val = tr32(GRC_MISC_CFG);
3095 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3098 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3100 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3103 tg3_writephy(tp, MII_ADVERTISE, 0);
3104 tg3_writephy(tp, MII_BMCR,
3105 BMCR_ANENABLE | BMCR_ANRESTART);
3107 tg3_writephy(tp, MII_TG3_FET_TEST,
3108 phytest | MII_TG3_FET_SHADOW_EN);
3109 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3110 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3112 MII_TG3_FET_SHDW_AUXMODE4,
3115 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3118 } else if (do_low_power) {
3119 if (!tg3_phy_led_bug(tp))
3120 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3121 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3123 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3124 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3125 MII_TG3_AUXCTL_PCTL_VREG_11V;
3126 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3129 /* The PHY should not be powered down on some chips because
3132 if (tg3_phy_power_bug(tp))
3135 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3136 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3137 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3138 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3139 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3140 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3143 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3146 /* tp->lock is held. */
3147 static int tg3_nvram_lock(struct tg3 *tp)
3149 if (tg3_flag(tp, NVRAM)) {
3152 if (tp->nvram_lock_cnt == 0) {
3153 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3154 for (i = 0; i < 8000; i++) {
3155 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3160 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 tp->nvram_lock_cnt++;
3169 /* tp->lock is held. */
3170 static void tg3_nvram_unlock(struct tg3 *tp)
3172 if (tg3_flag(tp, NVRAM)) {
3173 if (tp->nvram_lock_cnt > 0)
3174 tp->nvram_lock_cnt--;
3175 if (tp->nvram_lock_cnt == 0)
3176 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3180 /* tp->lock is held. */
3181 static void tg3_enable_nvram_access(struct tg3 *tp)
3183 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3184 u32 nvaccess = tr32(NVRAM_ACCESS);
3186 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3190 /* tp->lock is held. */
3191 static void tg3_disable_nvram_access(struct tg3 *tp)
3193 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3194 u32 nvaccess = tr32(NVRAM_ACCESS);
3196 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3200 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3201 u32 offset, u32 *val)
3206 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3209 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3210 EEPROM_ADDR_DEVID_MASK |
3212 tw32(GRC_EEPROM_ADDR,
3214 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3215 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3216 EEPROM_ADDR_ADDR_MASK) |
3217 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3219 for (i = 0; i < 1000; i++) {
3220 tmp = tr32(GRC_EEPROM_ADDR);
3222 if (tmp & EEPROM_ADDR_COMPLETE)
3226 if (!(tmp & EEPROM_ADDR_COMPLETE))
3229 tmp = tr32(GRC_EEPROM_DATA);
3232 * The data will always be opposite the native endian
3233 * format. Perform a blind byteswap to compensate.
3240 #define NVRAM_CMD_TIMEOUT 10000
3242 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3246 tw32(NVRAM_CMD, nvram_cmd);
3247 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3248 usleep_range(10, 40);
3249 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3255 if (i == NVRAM_CMD_TIMEOUT)
3261 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3263 if (tg3_flag(tp, NVRAM) &&
3264 tg3_flag(tp, NVRAM_BUFFERED) &&
3265 tg3_flag(tp, FLASH) &&
3266 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267 (tp->nvram_jedecnum == JEDEC_ATMEL))
3269 addr = ((addr / tp->nvram_pagesize) <<
3270 ATMEL_AT45DB0X1B_PAGE_POS) +
3271 (addr % tp->nvram_pagesize);
3276 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3278 if (tg3_flag(tp, NVRAM) &&
3279 tg3_flag(tp, NVRAM_BUFFERED) &&
3280 tg3_flag(tp, FLASH) &&
3281 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3282 (tp->nvram_jedecnum == JEDEC_ATMEL))
3284 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3285 tp->nvram_pagesize) +
3286 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3291 /* NOTE: Data read in from NVRAM is byteswapped according to
3292 * the byteswapping settings for all other register accesses.
3293 * tg3 devices are BE devices, so on a BE machine, the data
3294 * returned will be exactly as it is seen in NVRAM. On a LE
3295 * machine, the 32-bit value will be byteswapped.
3297 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3301 if (!tg3_flag(tp, NVRAM))
3302 return tg3_nvram_read_using_eeprom(tp, offset, val);
3304 offset = tg3_nvram_phys_addr(tp, offset);
3306 if (offset > NVRAM_ADDR_MSK)
3309 ret = tg3_nvram_lock(tp);
3313 tg3_enable_nvram_access(tp);
3315 tw32(NVRAM_ADDR, offset);
3316 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3317 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3320 *val = tr32(NVRAM_RDDATA);
3322 tg3_disable_nvram_access(tp);
3324 tg3_nvram_unlock(tp);
3329 /* Ensures NVRAM data is in bytestream format. */
3330 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3333 int res = tg3_nvram_read(tp, offset, &v);
3335 *val = cpu_to_be32(v);
3339 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3340 u32 offset, u32 len, u8 *buf)
3345 for (i = 0; i < len; i += 4) {
3351 memcpy(&data, buf + i, 4);
3354 * The SEEPROM interface expects the data to always be opposite
3355 * the native endian format. We accomplish this by reversing
3356 * all the operations that would have been performed on the
3357 * data from a call to tg3_nvram_read_be32().
3359 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3361 val = tr32(GRC_EEPROM_ADDR);
3362 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3364 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3366 tw32(GRC_EEPROM_ADDR, val |
3367 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3368 (addr & EEPROM_ADDR_ADDR_MASK) |
3372 for (j = 0; j < 1000; j++) {
3373 val = tr32(GRC_EEPROM_ADDR);
3375 if (val & EEPROM_ADDR_COMPLETE)
3379 if (!(val & EEPROM_ADDR_COMPLETE)) {
3388 /* offset and length are dword aligned */
3389 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3393 u32 pagesize = tp->nvram_pagesize;
3394 u32 pagemask = pagesize - 1;
3398 tmp = kmalloc(pagesize, GFP_KERNEL);
3404 u32 phy_addr, page_off, size;
3406 phy_addr = offset & ~pagemask;
3408 for (j = 0; j < pagesize; j += 4) {
3409 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3410 (__be32 *) (tmp + j));
3417 page_off = offset & pagemask;
3424 memcpy(tmp + page_off, buf, size);
3426 offset = offset + (pagesize - page_off);
3428 tg3_enable_nvram_access(tp);
3431 * Before we can erase the flash page, we need
3432 * to issue a special "write enable" command.
3434 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3436 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 /* Erase the target page */
3440 tw32(NVRAM_ADDR, phy_addr);
3442 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3443 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3445 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3448 /* Issue another write enable to start the write. */
3449 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3451 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3454 for (j = 0; j < pagesize; j += 4) {
3457 data = *((__be32 *) (tmp + j));
3459 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3461 tw32(NVRAM_ADDR, phy_addr + j);
3463 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3467 nvram_cmd |= NVRAM_CMD_FIRST;
3468 else if (j == (pagesize - 4))
3469 nvram_cmd |= NVRAM_CMD_LAST;
3471 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3479 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3480 tg3_nvram_exec_cmd(tp, nvram_cmd);
3487 /* offset and length are dword aligned */
3488 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3493 for (i = 0; i < len; i += 4, offset += 4) {
3494 u32 page_off, phy_addr, nvram_cmd;
3497 memcpy(&data, buf + i, 4);
3498 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3500 page_off = offset % tp->nvram_pagesize;
3502 phy_addr = tg3_nvram_phys_addr(tp, offset);
3504 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3506 if (page_off == 0 || i == 0)
3507 nvram_cmd |= NVRAM_CMD_FIRST;
3508 if (page_off == (tp->nvram_pagesize - 4))
3509 nvram_cmd |= NVRAM_CMD_LAST;
3512 nvram_cmd |= NVRAM_CMD_LAST;
3514 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3515 !tg3_flag(tp, FLASH) ||
3516 !tg3_flag(tp, 57765_PLUS))
3517 tw32(NVRAM_ADDR, phy_addr);
3519 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3520 !tg3_flag(tp, 5755_PLUS) &&
3521 (tp->nvram_jedecnum == JEDEC_ST) &&
3522 (nvram_cmd & NVRAM_CMD_FIRST)) {
3525 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3526 ret = tg3_nvram_exec_cmd(tp, cmd);
3530 if (!tg3_flag(tp, FLASH)) {
3531 /* We always do complete word writes to eeprom. */
3532 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3535 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3542 /* offset and length are dword aligned */
3543 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3547 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3548 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3549 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3553 if (!tg3_flag(tp, NVRAM)) {
3554 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3558 ret = tg3_nvram_lock(tp);
3562 tg3_enable_nvram_access(tp);
3563 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3564 tw32(NVRAM_WRITE1, 0x406);
3566 grc_mode = tr32(GRC_MODE);
3567 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3569 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3570 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3573 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3577 grc_mode = tr32(GRC_MODE);
3578 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3580 tg3_disable_nvram_access(tp);
3581 tg3_nvram_unlock(tp);
3584 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3585 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3592 #define RX_CPU_SCRATCH_BASE 0x30000
3593 #define RX_CPU_SCRATCH_SIZE 0x04000
3594 #define TX_CPU_SCRATCH_BASE 0x34000
3595 #define TX_CPU_SCRATCH_SIZE 0x04000
3597 /* tp->lock is held. */
3598 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3601 const int iters = 10000;
3603 for (i = 0; i < iters; i++) {
3604 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3606 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3608 if (pci_channel_offline(tp->pdev))
3612 return (i == iters) ? -EBUSY : 0;
3615 /* tp->lock is held. */
3616 static int tg3_rxcpu_pause(struct tg3 *tp)
3618 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3620 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3621 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3627 /* tp->lock is held. */
3628 static int tg3_txcpu_pause(struct tg3 *tp)
3630 return tg3_pause_cpu(tp, TX_CPU_BASE);
3633 /* tp->lock is held. */
3634 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3636 tw32(cpu_base + CPU_STATE, 0xffffffff);
3637 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3640 /* tp->lock is held. */
3641 static void tg3_rxcpu_resume(struct tg3 *tp)
3643 tg3_resume_cpu(tp, RX_CPU_BASE);
3646 /* tp->lock is held. */
3647 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3651 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3653 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3654 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3656 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3659 if (cpu_base == RX_CPU_BASE) {
3660 rc = tg3_rxcpu_pause(tp);
3663 * There is only an Rx CPU for the 5750 derivative in the
3666 if (tg3_flag(tp, IS_SSB_CORE))
3669 rc = tg3_txcpu_pause(tp);
3673 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3674 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3678 /* Clear firmware's nvram arbitration. */
3679 if (tg3_flag(tp, NVRAM))
3680 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3684 static int tg3_fw_data_len(struct tg3 *tp,
3685 const struct tg3_firmware_hdr *fw_hdr)
3689 /* Non fragmented firmware have one firmware header followed by a
3690 * contiguous chunk of data to be written. The length field in that
3691 * header is not the length of data to be written but the complete
3692 * length of the bss. The data length is determined based on
3693 * tp->fw->size minus headers.
3695 * Fragmented firmware have a main header followed by multiple
3696 * fragments. Each fragment is identical to non fragmented firmware
3697 * with a firmware header followed by a contiguous chunk of data. In
3698 * the main header, the length field is unused and set to 0xffffffff.
3699 * In each fragment header the length is the entire size of that
3700 * fragment i.e. fragment data + header length. Data length is
3701 * therefore length field in the header minus TG3_FW_HDR_LEN.
3703 if (tp->fw_len == 0xffffffff)
3704 fw_len = be32_to_cpu(fw_hdr->len);
3706 fw_len = tp->fw->size;
3708 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3711 /* tp->lock is held. */
3712 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3713 u32 cpu_scratch_base, int cpu_scratch_size,
3714 const struct tg3_firmware_hdr *fw_hdr)
3717 void (*write_op)(struct tg3 *, u32, u32);
3718 int total_len = tp->fw->size;
3720 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3722 "%s: Trying to load TX cpu firmware which is 5705\n",
3727 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3728 write_op = tg3_write_mem;
3730 write_op = tg3_write_indirect_reg32;
3732 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3733 /* It is possible that bootcode is still loading at this point.
3734 * Get the nvram lock first before halting the cpu.
3736 int lock_err = tg3_nvram_lock(tp);
3737 err = tg3_halt_cpu(tp, cpu_base);
3739 tg3_nvram_unlock(tp);
3743 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3744 write_op(tp, cpu_scratch_base + i, 0);
3745 tw32(cpu_base + CPU_STATE, 0xffffffff);
3746 tw32(cpu_base + CPU_MODE,
3747 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3749 /* Subtract additional main header for fragmented firmware and
3750 * advance to the first fragment
3752 total_len -= TG3_FW_HDR_LEN;
3757 u32 *fw_data = (u32 *)(fw_hdr + 1);
3758 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3759 write_op(tp, cpu_scratch_base +
3760 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3762 be32_to_cpu(fw_data[i]));
3764 total_len -= be32_to_cpu(fw_hdr->len);
3766 /* Advance to next fragment */
3767 fw_hdr = (struct tg3_firmware_hdr *)
3768 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3769 } while (total_len > 0);
3777 /* tp->lock is held. */
3778 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3781 const int iters = 5;
3783 tw32(cpu_base + CPU_STATE, 0xffffffff);
3784 tw32_f(cpu_base + CPU_PC, pc);
3786 for (i = 0; i < iters; i++) {
3787 if (tr32(cpu_base + CPU_PC) == pc)
3789 tw32(cpu_base + CPU_STATE, 0xffffffff);
3790 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3791 tw32_f(cpu_base + CPU_PC, pc);
3795 return (i == iters) ? -EBUSY : 0;
3798 /* tp->lock is held. */
3799 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3801 const struct tg3_firmware_hdr *fw_hdr;
3804 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3806 /* Firmware blob starts with version numbers, followed by
3807 start address and length. We are setting complete length.
3808 length = end_address_of_bss - start_address_of_text.
3809 Remainder is the blob to be loaded contiguously
3810 from start address. */
3812 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3813 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3818 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3819 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3824 /* Now startup only the RX cpu. */
3825 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3826 be32_to_cpu(fw_hdr->base_addr));
3828 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3829 "should be %08x\n", __func__,
3830 tr32(RX_CPU_BASE + CPU_PC),
3831 be32_to_cpu(fw_hdr->base_addr));
3835 tg3_rxcpu_resume(tp);
3840 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3842 const int iters = 1000;
3846 /* Wait for boot code to complete initialization and enter service
3847 * loop. It is then safe to download service patches
3849 for (i = 0; i < iters; i++) {
3850 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3857 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3861 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3863 netdev_warn(tp->dev,
3864 "Other patches exist. Not downloading EEE patch\n");
3871 /* tp->lock is held. */
3872 static void tg3_load_57766_firmware(struct tg3 *tp)
3874 struct tg3_firmware_hdr *fw_hdr;
3876 if (!tg3_flag(tp, NO_NVRAM))
3879 if (tg3_validate_rxcpu_state(tp))
3885 /* This firmware blob has a different format than older firmware
3886 * releases as given below. The main difference is we have fragmented
3887 * data to be written to non-contiguous locations.
3889 * In the beginning we have a firmware header identical to other
3890 * firmware which consists of version, base addr and length. The length
3891 * here is unused and set to 0xffffffff.
3893 * This is followed by a series of firmware fragments which are
3894 * individually identical to previous firmware. i.e. they have the
3895 * firmware header and followed by data for that fragment. The version
3896 * field of the individual fragment header is unused.
3899 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3900 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3903 if (tg3_rxcpu_pause(tp))
3906 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3907 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3909 tg3_rxcpu_resume(tp);
3912 /* tp->lock is held. */
3913 static int tg3_load_tso_firmware(struct tg3 *tp)
3915 const struct tg3_firmware_hdr *fw_hdr;
3916 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3919 if (!tg3_flag(tp, FW_TSO))
3922 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3924 /* Firmware blob starts with version numbers, followed by
3925 start address and length. We are setting complete length.
3926 length = end_address_of_bss - start_address_of_text.
3927 Remainder is the blob to be loaded contiguously
3928 from start address. */
3930 cpu_scratch_size = tp->fw_len;
3932 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3933 cpu_base = RX_CPU_BASE;
3934 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3936 cpu_base = TX_CPU_BASE;
3937 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3938 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3941 err = tg3_load_firmware_cpu(tp, cpu_base,
3942 cpu_scratch_base, cpu_scratch_size,
3947 /* Now startup the cpu. */
3948 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3949 be32_to_cpu(fw_hdr->base_addr));
3952 "%s fails to set CPU PC, is %08x should be %08x\n",
3953 __func__, tr32(cpu_base + CPU_PC),
3954 be32_to_cpu(fw_hdr->base_addr));
3958 tg3_resume_cpu(tp, cpu_base);
3962 /* tp->lock is held. */
3963 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3965 u32 addr_high, addr_low;
3967 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3968 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3969 (mac_addr[4] << 8) | mac_addr[5]);
3972 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3973 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3976 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3977 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3981 /* tp->lock is held. */
3982 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3987 for (i = 0; i < 4; i++) {
3988 if (i == 1 && skip_mac_1)
3990 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3993 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3994 tg3_asic_rev(tp) == ASIC_REV_5704) {
3995 for (i = 4; i < 16; i++)
3996 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3999 addr_high = (tp->dev->dev_addr[0] +
4000 tp->dev->dev_addr[1] +
4001 tp->dev->dev_addr[2] +
4002 tp->dev->dev_addr[3] +
4003 tp->dev->dev_addr[4] +
4004 tp->dev->dev_addr[5]) &
4005 TX_BACKOFF_SEED_MASK;
4006 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4009 static void tg3_enable_register_access(struct tg3 *tp)
4012 * Make sure register accesses (indirect or otherwise) will function
4015 pci_write_config_dword(tp->pdev,
4016 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4019 static int tg3_power_up(struct tg3 *tp)
4023 tg3_enable_register_access(tp);
4025 err = pci_set_power_state(tp->pdev, PCI_D0);
4027 /* Switch out of Vaux if it is a NIC */
4028 tg3_pwrsrc_switch_to_vmain(tp);
4030 netdev_err(tp->dev, "Transition to D0 failed\n");
4036 static int tg3_setup_phy(struct tg3 *, bool);
4038 static int tg3_power_down_prepare(struct tg3 *tp)
4041 bool device_should_wake, do_low_power;
4043 tg3_enable_register_access(tp);
4045 /* Restore the CLKREQ setting. */
4046 if (tg3_flag(tp, CLKREQ_BUG))
4047 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4048 PCI_EXP_LNKCTL_CLKREQ_EN);
4050 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4051 tw32(TG3PCI_MISC_HOST_CTRL,
4052 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4054 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4055 tg3_flag(tp, WOL_ENABLE);
4057 if (tg3_flag(tp, USE_PHYLIB)) {
4058 do_low_power = false;
4059 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4060 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4061 struct phy_device *phydev;
4062 u32 phyid, advertising;
4064 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4066 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4068 tp->link_config.speed = phydev->speed;
4069 tp->link_config.duplex = phydev->duplex;
4070 tp->link_config.autoneg = phydev->autoneg;
4071 tp->link_config.advertising = phydev->advertising;
4073 advertising = ADVERTISED_TP |
4075 ADVERTISED_Autoneg |
4076 ADVERTISED_10baseT_Half;
4078 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4079 if (tg3_flag(tp, WOL_SPEED_100MB))
4081 ADVERTISED_100baseT_Half |
4082 ADVERTISED_100baseT_Full |
4083 ADVERTISED_10baseT_Full;
4085 advertising |= ADVERTISED_10baseT_Full;
4088 phydev->advertising = advertising;
4090 phy_start_aneg(phydev);
4092 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4093 if (phyid != PHY_ID_BCMAC131) {
4094 phyid &= PHY_BCM_OUI_MASK;
4095 if (phyid == PHY_BCM_OUI_1 ||
4096 phyid == PHY_BCM_OUI_2 ||
4097 phyid == PHY_BCM_OUI_3)
4098 do_low_power = true;
4102 do_low_power = true;
4104 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4105 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4107 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4108 tg3_setup_phy(tp, false);
4111 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4114 val = tr32(GRC_VCPU_EXT_CTRL);
4115 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4116 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4120 for (i = 0; i < 200; i++) {
4121 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4122 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4127 if (tg3_flag(tp, WOL_CAP))
4128 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4129 WOL_DRV_STATE_SHUTDOWN |
4133 if (device_should_wake) {
4136 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4138 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4139 tg3_phy_auxctl_write(tp,
4140 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4141 MII_TG3_AUXCTL_PCTL_WOL_EN |
4142 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4143 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4147 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4148 mac_mode = MAC_MODE_PORT_MODE_GMII;
4149 else if (tp->phy_flags &
4150 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4151 if (tp->link_config.active_speed == SPEED_1000)
4152 mac_mode = MAC_MODE_PORT_MODE_GMII;
4154 mac_mode = MAC_MODE_PORT_MODE_MII;
4156 mac_mode = MAC_MODE_PORT_MODE_MII;
4158 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4159 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4160 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4161 SPEED_100 : SPEED_10;
4162 if (tg3_5700_link_polarity(tp, speed))
4163 mac_mode |= MAC_MODE_LINK_POLARITY;
4165 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4168 mac_mode = MAC_MODE_PORT_MODE_TBI;
4171 if (!tg3_flag(tp, 5750_PLUS))
4172 tw32(MAC_LED_CTRL, tp->led_ctrl);
4174 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4175 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4176 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4177 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4179 if (tg3_flag(tp, ENABLE_APE))
4180 mac_mode |= MAC_MODE_APE_TX_EN |
4181 MAC_MODE_APE_RX_EN |
4182 MAC_MODE_TDE_ENABLE;
4184 tw32_f(MAC_MODE, mac_mode);
4187 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4191 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4192 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4193 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4196 base_val = tp->pci_clock_ctrl;
4197 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4198 CLOCK_CTRL_TXCLK_DISABLE);
4200 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4201 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4202 } else if (tg3_flag(tp, 5780_CLASS) ||
4203 tg3_flag(tp, CPMU_PRESENT) ||
4204 tg3_asic_rev(tp) == ASIC_REV_5906) {
4206 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4207 u32 newbits1, newbits2;
4209 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4210 tg3_asic_rev(tp) == ASIC_REV_5701) {
4211 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4212 CLOCK_CTRL_TXCLK_DISABLE |
4214 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 } else if (tg3_flag(tp, 5705_PLUS)) {
4216 newbits1 = CLOCK_CTRL_625_CORE;
4217 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4219 newbits1 = CLOCK_CTRL_ALTCLK;
4220 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4223 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4229 if (!tg3_flag(tp, 5705_PLUS)) {
4232 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4233 tg3_asic_rev(tp) == ASIC_REV_5701) {
4234 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4235 CLOCK_CTRL_TXCLK_DISABLE |
4236 CLOCK_CTRL_44MHZ_CORE);
4238 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4241 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4242 tp->pci_clock_ctrl | newbits3, 40);
4246 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4247 tg3_power_down_phy(tp, do_low_power);
4249 tg3_frob_aux_power(tp, true);
4251 /* Workaround for unstable PLL clock */
4252 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4253 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4254 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4255 u32 val = tr32(0x7d00);
4257 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4259 if (!tg3_flag(tp, ENABLE_ASF)) {
4262 err = tg3_nvram_lock(tp);
4263 tg3_halt_cpu(tp, RX_CPU_BASE);
4265 tg3_nvram_unlock(tp);
4269 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4271 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4276 static void tg3_power_down(struct tg3 *tp)
4278 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4279 pci_set_power_state(tp->pdev, PCI_D3hot);
4282 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4284 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4285 case MII_TG3_AUX_STAT_10HALF:
4287 *duplex = DUPLEX_HALF;
4290 case MII_TG3_AUX_STAT_10FULL:
4292 *duplex = DUPLEX_FULL;
4295 case MII_TG3_AUX_STAT_100HALF:
4297 *duplex = DUPLEX_HALF;
4300 case MII_TG3_AUX_STAT_100FULL:
4302 *duplex = DUPLEX_FULL;
4305 case MII_TG3_AUX_STAT_1000HALF:
4306 *speed = SPEED_1000;
4307 *duplex = DUPLEX_HALF;
4310 case MII_TG3_AUX_STAT_1000FULL:
4311 *speed = SPEED_1000;
4312 *duplex = DUPLEX_FULL;
4316 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4317 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4319 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4323 *speed = SPEED_UNKNOWN;
4324 *duplex = DUPLEX_UNKNOWN;
4329 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4334 new_adv = ADVERTISE_CSMA;
4335 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4336 new_adv |= mii_advertise_flowctrl(flowctrl);
4338 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4342 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4343 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4345 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4346 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4347 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4349 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4354 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4357 tw32(TG3_CPMU_EEE_MODE,
4358 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4360 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4365 /* Advertise 100-BaseTX EEE ability */
4366 if (advertise & ADVERTISED_100baseT_Full)
4367 val |= MDIO_AN_EEE_ADV_100TX;
4368 /* Advertise 1000-BaseT EEE ability */
4369 if (advertise & ADVERTISED_1000baseT_Full)
4370 val |= MDIO_AN_EEE_ADV_1000T;
4372 if (!tp->eee.eee_enabled) {
4374 tp->eee.advertised = 0;
4376 tp->eee.advertised = advertise &
4377 (ADVERTISED_100baseT_Full |
4378 ADVERTISED_1000baseT_Full);
4381 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4385 switch (tg3_asic_rev(tp)) {
4387 case ASIC_REV_57765:
4388 case ASIC_REV_57766:
4390 /* If we advertised any eee advertisements above... */
4392 val = MII_TG3_DSP_TAP26_ALNOKO |
4393 MII_TG3_DSP_TAP26_RMRXSTO |
4394 MII_TG3_DSP_TAP26_OPCSINPT;
4395 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4399 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4400 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4401 MII_TG3_DSP_CH34TP2_HIBW01);
4404 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4413 static void tg3_phy_copper_begin(struct tg3 *tp)
4415 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4416 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4419 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4420 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4421 adv = ADVERTISED_10baseT_Half |
4422 ADVERTISED_10baseT_Full;
4423 if (tg3_flag(tp, WOL_SPEED_100MB))
4424 adv |= ADVERTISED_100baseT_Half |
4425 ADVERTISED_100baseT_Full;
4426 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4427 if (!(tp->phy_flags &
4428 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4429 adv |= ADVERTISED_1000baseT_Half;
4430 adv |= ADVERTISED_1000baseT_Full;
4433 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4435 adv = tp->link_config.advertising;
4436 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4437 adv &= ~(ADVERTISED_1000baseT_Half |
4438 ADVERTISED_1000baseT_Full);
4440 fc = tp->link_config.flowctrl;
4443 tg3_phy_autoneg_cfg(tp, adv, fc);
4445 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4446 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4447 /* Normally during power down we want to autonegotiate
4448 * the lowest possible speed for WOL. However, to avoid
4449 * link flap, we leave it untouched.
4454 tg3_writephy(tp, MII_BMCR,
4455 BMCR_ANENABLE | BMCR_ANRESTART);
4458 u32 bmcr, orig_bmcr;
4460 tp->link_config.active_speed = tp->link_config.speed;
4461 tp->link_config.active_duplex = tp->link_config.duplex;
4463 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4464 /* With autoneg disabled, 5715 only links up when the
4465 * advertisement register has the configured speed
4468 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4472 switch (tp->link_config.speed) {
4478 bmcr |= BMCR_SPEED100;
4482 bmcr |= BMCR_SPEED1000;
4486 if (tp->link_config.duplex == DUPLEX_FULL)
4487 bmcr |= BMCR_FULLDPLX;
4489 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4490 (bmcr != orig_bmcr)) {
4491 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4492 for (i = 0; i < 1500; i++) {
4496 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4497 tg3_readphy(tp, MII_BMSR, &tmp))
4499 if (!(tmp & BMSR_LSTATUS)) {
4504 tg3_writephy(tp, MII_BMCR, bmcr);
4510 static int tg3_phy_pull_config(struct tg3 *tp)
4515 err = tg3_readphy(tp, MII_BMCR, &val);
4519 if (!(val & BMCR_ANENABLE)) {
4520 tp->link_config.autoneg = AUTONEG_DISABLE;
4521 tp->link_config.advertising = 0;
4522 tg3_flag_clear(tp, PAUSE_AUTONEG);
4526 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4528 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4531 tp->link_config.speed = SPEED_10;
4534 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4537 tp->link_config.speed = SPEED_100;
4539 case BMCR_SPEED1000:
4540 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4541 tp->link_config.speed = SPEED_1000;
4549 if (val & BMCR_FULLDPLX)
4550 tp->link_config.duplex = DUPLEX_FULL;
4552 tp->link_config.duplex = DUPLEX_HALF;
4554 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4560 tp->link_config.autoneg = AUTONEG_ENABLE;
4561 tp->link_config.advertising = ADVERTISED_Autoneg;
4562 tg3_flag_set(tp, PAUSE_AUTONEG);
4564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4567 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4571 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4572 tp->link_config.advertising |= adv | ADVERTISED_TP;
4574 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4576 tp->link_config.advertising |= ADVERTISED_FIBRE;
4579 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4582 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4583 err = tg3_readphy(tp, MII_CTRL1000, &val);
4587 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4589 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4593 adv = tg3_decode_flowctrl_1000X(val);
4594 tp->link_config.flowctrl = adv;
4596 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4597 adv = mii_adv_to_ethtool_adv_x(val);
4600 tp->link_config.advertising |= adv;
4607 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4611 /* Turn off tap power management. */
4612 /* Set Extended packet length bit */
4613 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4615 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4616 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4617 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4618 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4619 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4626 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4628 struct ethtool_eee eee;
4630 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4633 tg3_eee_pull_config(tp, &eee);
4635 if (tp->eee.eee_enabled) {
4636 if (tp->eee.advertised != eee.advertised ||
4637 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4638 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4641 /* EEE is disabled but we're advertising */
4649 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4651 u32 advmsk, tgtadv, advertising;
4653 advertising = tp->link_config.advertising;
4654 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4656 advmsk = ADVERTISE_ALL;
4657 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4658 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4659 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4662 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4665 if ((*lcladv & advmsk) != tgtadv)
4668 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4671 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4673 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4677 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4678 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4679 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4680 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4681 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4683 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4686 if (tg3_ctrl != tgtadv)
4693 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4697 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4700 if (tg3_readphy(tp, MII_STAT1000, &val))
4703 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4706 if (tg3_readphy(tp, MII_LPA, rmtadv))
4709 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4710 tp->link_config.rmt_adv = lpeth;
4715 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4717 if (curr_link_up != tp->link_up) {
4719 netif_carrier_on(tp->dev);
4721 netif_carrier_off(tp->dev);
4722 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4723 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4726 tg3_link_report(tp);
4733 static void tg3_clear_mac_status(struct tg3 *tp)
4738 MAC_STATUS_SYNC_CHANGED |
4739 MAC_STATUS_CFG_CHANGED |
4740 MAC_STATUS_MI_COMPLETION |
4741 MAC_STATUS_LNKSTATE_CHANGED);
4745 static void tg3_setup_eee(struct tg3 *tp)
4749 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4750 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4751 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4752 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4754 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4756 tw32_f(TG3_CPMU_EEE_CTRL,
4757 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4759 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4760 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4761 TG3_CPMU_EEEMD_LPI_IN_RX |
4762 TG3_CPMU_EEEMD_EEE_ENABLE;
4764 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4765 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4767 if (tg3_flag(tp, ENABLE_APE))
4768 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4770 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4772 tw32_f(TG3_CPMU_EEE_DBTMR1,
4773 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4774 (tp->eee.tx_lpi_timer & 0xffff));
4776 tw32_f(TG3_CPMU_EEE_DBTMR2,
4777 TG3_CPMU_DBTMR2_APE_TX_2047US |
4778 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4781 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4783 bool current_link_up;
4785 u32 lcl_adv, rmt_adv;
4790 tg3_clear_mac_status(tp);
4792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4798 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4800 /* Some third-party PHYs need to be reset on link going
4803 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4804 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4805 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4807 tg3_readphy(tp, MII_BMSR, &bmsr);
4808 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4809 !(bmsr & BMSR_LSTATUS))
4815 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4816 tg3_readphy(tp, MII_BMSR, &bmsr);
4817 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4818 !tg3_flag(tp, INIT_COMPLETE))
4821 if (!(bmsr & BMSR_LSTATUS)) {
4822 err = tg3_init_5401phy_dsp(tp);
4826 tg3_readphy(tp, MII_BMSR, &bmsr);
4827 for (i = 0; i < 1000; i++) {
4829 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4830 (bmsr & BMSR_LSTATUS)) {
4836 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4837 TG3_PHY_REV_BCM5401_B0 &&
4838 !(bmsr & BMSR_LSTATUS) &&
4839 tp->link_config.active_speed == SPEED_1000) {
4840 err = tg3_phy_reset(tp);
4842 err = tg3_init_5401phy_dsp(tp);
4847 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4848 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4849 /* 5701 {A0,B0} CRC bug workaround */
4850 tg3_writephy(tp, 0x15, 0x0a75);
4851 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4853 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4856 /* Clear pending interrupts... */
4857 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4858 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4860 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4861 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4862 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4863 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4865 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4866 tg3_asic_rev(tp) == ASIC_REV_5701) {
4867 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4868 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4869 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4871 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4874 current_link_up = false;
4875 current_speed = SPEED_UNKNOWN;
4876 current_duplex = DUPLEX_UNKNOWN;
4877 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4878 tp->link_config.rmt_adv = 0;
4880 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4881 err = tg3_phy_auxctl_read(tp,
4882 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4884 if (!err && !(val & (1 << 10))) {
4885 tg3_phy_auxctl_write(tp,
4886 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4893 for (i = 0; i < 100; i++) {
4894 tg3_readphy(tp, MII_BMSR, &bmsr);
4895 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4896 (bmsr & BMSR_LSTATUS))
4901 if (bmsr & BMSR_LSTATUS) {
4904 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4905 for (i = 0; i < 2000; i++) {
4907 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4912 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4917 for (i = 0; i < 200; i++) {
4918 tg3_readphy(tp, MII_BMCR, &bmcr);
4919 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4921 if (bmcr && bmcr != 0x7fff)
4929 tp->link_config.active_speed = current_speed;
4930 tp->link_config.active_duplex = current_duplex;
4932 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4933 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4935 if ((bmcr & BMCR_ANENABLE) &&
4937 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4938 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4939 current_link_up = true;
4941 /* EEE settings changes take effect only after a phy
4942 * reset. If we have skipped a reset due to Link Flap
4943 * Avoidance being enabled, do it now.
4945 if (!eee_config_ok &&
4946 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4952 if (!(bmcr & BMCR_ANENABLE) &&
4953 tp->link_config.speed == current_speed &&
4954 tp->link_config.duplex == current_duplex) {
4955 current_link_up = true;
4959 if (current_link_up &&
4960 tp->link_config.active_duplex == DUPLEX_FULL) {
4963 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4964 reg = MII_TG3_FET_GEN_STAT;
4965 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4967 reg = MII_TG3_EXT_STAT;
4968 bit = MII_TG3_EXT_STAT_MDIX;
4971 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4972 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4974 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4979 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4980 tg3_phy_copper_begin(tp);
4982 if (tg3_flag(tp, ROBOSWITCH)) {
4983 current_link_up = true;
4984 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4985 current_speed = SPEED_1000;
4986 current_duplex = DUPLEX_FULL;
4987 tp->link_config.active_speed = current_speed;
4988 tp->link_config.active_duplex = current_duplex;
4991 tg3_readphy(tp, MII_BMSR, &bmsr);
4992 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4993 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4994 current_link_up = true;
4997 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4998 if (current_link_up) {
4999 if (tp->link_config.active_speed == SPEED_100 ||
5000 tp->link_config.active_speed == SPEED_10)
5001 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5003 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5004 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5005 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5007 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5009 /* In order for the 5750 core in BCM4785 chip to work properly
5010 * in RGMII mode, the Led Control Register must be set up.
5012 if (tg3_flag(tp, RGMII_MODE)) {
5013 u32 led_ctrl = tr32(MAC_LED_CTRL);
5014 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5016 if (tp->link_config.active_speed == SPEED_10)
5017 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5018 else if (tp->link_config.active_speed == SPEED_100)
5019 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5020 LED_CTRL_100MBPS_ON);
5021 else if (tp->link_config.active_speed == SPEED_1000)
5022 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 LED_CTRL_1000MBPS_ON);
5025 tw32(MAC_LED_CTRL, led_ctrl);
5029 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5030 if (tp->link_config.active_duplex == DUPLEX_HALF)
5031 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5033 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5034 if (current_link_up &&
5035 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5036 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5038 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5041 /* ??? Without this setting Netgear GA302T PHY does not
5042 * ??? send/receive packets...
5044 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5045 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5046 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5047 tw32_f(MAC_MI_MODE, tp->mi_mode);
5051 tw32_f(MAC_MODE, tp->mac_mode);
5054 tg3_phy_eee_adjust(tp, current_link_up);
5056 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5057 /* Polled via timer. */
5058 tw32_f(MAC_EVENT, 0);
5060 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5064 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5066 tp->link_config.active_speed == SPEED_1000 &&
5067 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5070 (MAC_STATUS_SYNC_CHANGED |
5071 MAC_STATUS_CFG_CHANGED));
5074 NIC_SRAM_FIRMWARE_MBOX,
5075 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5078 /* Prevent send BD corruption. */
5079 if (tg3_flag(tp, CLKREQ_BUG)) {
5080 if (tp->link_config.active_speed == SPEED_100 ||
5081 tp->link_config.active_speed == SPEED_10)
5082 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5083 PCI_EXP_LNKCTL_CLKREQ_EN);
5085 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5086 PCI_EXP_LNKCTL_CLKREQ_EN);
5089 tg3_test_and_report_link_chg(tp, current_link_up);
5094 struct tg3_fiber_aneginfo {
5096 #define ANEG_STATE_UNKNOWN 0
5097 #define ANEG_STATE_AN_ENABLE 1
5098 #define ANEG_STATE_RESTART_INIT 2
5099 #define ANEG_STATE_RESTART 3
5100 #define ANEG_STATE_DISABLE_LINK_OK 4
5101 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5102 #define ANEG_STATE_ABILITY_DETECT 6
5103 #define ANEG_STATE_ACK_DETECT_INIT 7
5104 #define ANEG_STATE_ACK_DETECT 8
5105 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5106 #define ANEG_STATE_COMPLETE_ACK 10
5107 #define ANEG_STATE_IDLE_DETECT_INIT 11
5108 #define ANEG_STATE_IDLE_DETECT 12
5109 #define ANEG_STATE_LINK_OK 13
5110 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5111 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5114 #define MR_AN_ENABLE 0x00000001
5115 #define MR_RESTART_AN 0x00000002
5116 #define MR_AN_COMPLETE 0x00000004
5117 #define MR_PAGE_RX 0x00000008
5118 #define MR_NP_LOADED 0x00000010
5119 #define MR_TOGGLE_TX 0x00000020
5120 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5121 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5122 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5123 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5124 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5125 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5126 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5127 #define MR_TOGGLE_RX 0x00002000
5128 #define MR_NP_RX 0x00004000
5130 #define MR_LINK_OK 0x80000000
5132 unsigned long link_time, cur_time;
5134 u32 ability_match_cfg;
5135 int ability_match_count;
5137 char ability_match, idle_match, ack_match;
5139 u32 txconfig, rxconfig;
5140 #define ANEG_CFG_NP 0x00000080
5141 #define ANEG_CFG_ACK 0x00000040
5142 #define ANEG_CFG_RF2 0x00000020
5143 #define ANEG_CFG_RF1 0x00000010
5144 #define ANEG_CFG_PS2 0x00000001
5145 #define ANEG_CFG_PS1 0x00008000
5146 #define ANEG_CFG_HD 0x00004000
5147 #define ANEG_CFG_FD 0x00002000
5148 #define ANEG_CFG_INVAL 0x00001f06
5153 #define ANEG_TIMER_ENAB 2
5154 #define ANEG_FAILED -1
5156 #define ANEG_STATE_SETTLE_TIME 10000
5158 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5159 struct tg3_fiber_aneginfo *ap)
5162 unsigned long delta;
5166 if (ap->state == ANEG_STATE_UNKNOWN) {
5170 ap->ability_match_cfg = 0;
5171 ap->ability_match_count = 0;
5172 ap->ability_match = 0;
5178 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5179 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5181 if (rx_cfg_reg != ap->ability_match_cfg) {
5182 ap->ability_match_cfg = rx_cfg_reg;
5183 ap->ability_match = 0;
5184 ap->ability_match_count = 0;
5186 if (++ap->ability_match_count > 1) {
5187 ap->ability_match = 1;
5188 ap->ability_match_cfg = rx_cfg_reg;
5191 if (rx_cfg_reg & ANEG_CFG_ACK)
5199 ap->ability_match_cfg = 0;
5200 ap->ability_match_count = 0;
5201 ap->ability_match = 0;
5207 ap->rxconfig = rx_cfg_reg;
5210 switch (ap->state) {
5211 case ANEG_STATE_UNKNOWN:
5212 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5213 ap->state = ANEG_STATE_AN_ENABLE;
5216 case ANEG_STATE_AN_ENABLE:
5217 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5218 if (ap->flags & MR_AN_ENABLE) {
5221 ap->ability_match_cfg = 0;
5222 ap->ability_match_count = 0;
5223 ap->ability_match = 0;
5227 ap->state = ANEG_STATE_RESTART_INIT;
5229 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5233 case ANEG_STATE_RESTART_INIT:
5234 ap->link_time = ap->cur_time;
5235 ap->flags &= ~(MR_NP_LOADED);
5237 tw32(MAC_TX_AUTO_NEG, 0);
5238 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5239 tw32_f(MAC_MODE, tp->mac_mode);
5242 ret = ANEG_TIMER_ENAB;
5243 ap->state = ANEG_STATE_RESTART;
5246 case ANEG_STATE_RESTART:
5247 delta = ap->cur_time - ap->link_time;
5248 if (delta > ANEG_STATE_SETTLE_TIME)
5249 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5251 ret = ANEG_TIMER_ENAB;
5254 case ANEG_STATE_DISABLE_LINK_OK:
5258 case ANEG_STATE_ABILITY_DETECT_INIT:
5259 ap->flags &= ~(MR_TOGGLE_TX);
5260 ap->txconfig = ANEG_CFG_FD;
5261 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5262 if (flowctrl & ADVERTISE_1000XPAUSE)
5263 ap->txconfig |= ANEG_CFG_PS1;
5264 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5265 ap->txconfig |= ANEG_CFG_PS2;
5266 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268 tw32_f(MAC_MODE, tp->mac_mode);
5271 ap->state = ANEG_STATE_ABILITY_DETECT;
5274 case ANEG_STATE_ABILITY_DETECT:
5275 if (ap->ability_match != 0 && ap->rxconfig != 0)
5276 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5279 case ANEG_STATE_ACK_DETECT_INIT:
5280 ap->txconfig |= ANEG_CFG_ACK;
5281 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5282 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5283 tw32_f(MAC_MODE, tp->mac_mode);
5286 ap->state = ANEG_STATE_ACK_DETECT;
5289 case ANEG_STATE_ACK_DETECT:
5290 if (ap->ack_match != 0) {
5291 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5292 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5293 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5295 ap->state = ANEG_STATE_AN_ENABLE;
5297 } else if (ap->ability_match != 0 &&
5298 ap->rxconfig == 0) {
5299 ap->state = ANEG_STATE_AN_ENABLE;
5303 case ANEG_STATE_COMPLETE_ACK_INIT:
5304 if (ap->rxconfig & ANEG_CFG_INVAL) {
5308 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5309 MR_LP_ADV_HALF_DUPLEX |
5310 MR_LP_ADV_SYM_PAUSE |
5311 MR_LP_ADV_ASYM_PAUSE |
5312 MR_LP_ADV_REMOTE_FAULT1 |
5313 MR_LP_ADV_REMOTE_FAULT2 |
5314 MR_LP_ADV_NEXT_PAGE |
5317 if (ap->rxconfig & ANEG_CFG_FD)
5318 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5319 if (ap->rxconfig & ANEG_CFG_HD)
5320 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5321 if (ap->rxconfig & ANEG_CFG_PS1)
5322 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5323 if (ap->rxconfig & ANEG_CFG_PS2)
5324 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5325 if (ap->rxconfig & ANEG_CFG_RF1)
5326 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5327 if (ap->rxconfig & ANEG_CFG_RF2)
5328 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5329 if (ap->rxconfig & ANEG_CFG_NP)
5330 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5332 ap->link_time = ap->cur_time;
5334 ap->flags ^= (MR_TOGGLE_TX);
5335 if (ap->rxconfig & 0x0008)
5336 ap->flags |= MR_TOGGLE_RX;
5337 if (ap->rxconfig & ANEG_CFG_NP)
5338 ap->flags |= MR_NP_RX;
5339 ap->flags |= MR_PAGE_RX;
5341 ap->state = ANEG_STATE_COMPLETE_ACK;
5342 ret = ANEG_TIMER_ENAB;
5345 case ANEG_STATE_COMPLETE_ACK:
5346 if (ap->ability_match != 0 &&
5347 ap->rxconfig == 0) {
5348 ap->state = ANEG_STATE_AN_ENABLE;
5351 delta = ap->cur_time - ap->link_time;
5352 if (delta > ANEG_STATE_SETTLE_TIME) {
5353 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5354 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5356 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5357 !(ap->flags & MR_NP_RX)) {
5358 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5366 case ANEG_STATE_IDLE_DETECT_INIT:
5367 ap->link_time = ap->cur_time;
5368 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5369 tw32_f(MAC_MODE, tp->mac_mode);
5372 ap->state = ANEG_STATE_IDLE_DETECT;
5373 ret = ANEG_TIMER_ENAB;
5376 case ANEG_STATE_IDLE_DETECT:
5377 if (ap->ability_match != 0 &&
5378 ap->rxconfig == 0) {
5379 ap->state = ANEG_STATE_AN_ENABLE;
5382 delta = ap->cur_time - ap->link_time;
5383 if (delta > ANEG_STATE_SETTLE_TIME) {
5384 /* XXX another gem from the Broadcom driver :( */
5385 ap->state = ANEG_STATE_LINK_OK;
5389 case ANEG_STATE_LINK_OK:
5390 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5394 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5395 /* ??? unimplemented */
5398 case ANEG_STATE_NEXT_PAGE_WAIT:
5399 /* ??? unimplemented */
5410 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5413 struct tg3_fiber_aneginfo aninfo;
5414 int status = ANEG_FAILED;
5418 tw32_f(MAC_TX_AUTO_NEG, 0);
5420 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5421 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5424 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5427 memset(&aninfo, 0, sizeof(aninfo));
5428 aninfo.flags |= MR_AN_ENABLE;
5429 aninfo.state = ANEG_STATE_UNKNOWN;
5430 aninfo.cur_time = 0;
5432 while (++tick < 195000) {
5433 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5434 if (status == ANEG_DONE || status == ANEG_FAILED)
5440 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5441 tw32_f(MAC_MODE, tp->mac_mode);
5444 *txflags = aninfo.txconfig;
5445 *rxflags = aninfo.flags;
5447 if (status == ANEG_DONE &&
5448 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5449 MR_LP_ADV_FULL_DUPLEX)))
5455 static void tg3_init_bcm8002(struct tg3 *tp)
5457 u32 mac_status = tr32(MAC_STATUS);
5460 /* Reset when initting first time or we have a link. */
5461 if (tg3_flag(tp, INIT_COMPLETE) &&
5462 !(mac_status & MAC_STATUS_PCS_SYNCED))
5465 /* Set PLL lock range. */
5466 tg3_writephy(tp, 0x16, 0x8007);
5469 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5471 /* Wait for reset to complete. */
5472 /* XXX schedule_timeout() ... */
5473 for (i = 0; i < 500; i++)
5476 /* Config mode; select PMA/Ch 1 regs. */
5477 tg3_writephy(tp, 0x10, 0x8411);
5479 /* Enable auto-lock and comdet, select txclk for tx. */
5480 tg3_writephy(tp, 0x11, 0x0a10);
5482 tg3_writephy(tp, 0x18, 0x00a0);
5483 tg3_writephy(tp, 0x16, 0x41ff);
5485 /* Assert and deassert POR. */
5486 tg3_writephy(tp, 0x13, 0x0400);
5488 tg3_writephy(tp, 0x13, 0x0000);
5490 tg3_writephy(tp, 0x11, 0x0a50);
5492 tg3_writephy(tp, 0x11, 0x0a10);
5494 /* Wait for signal to stabilize */
5495 /* XXX schedule_timeout() ... */
5496 for (i = 0; i < 15000; i++)
5499 /* Deselect the channel register so we can read the PHYID
5502 tg3_writephy(tp, 0x10, 0x8011);
5505 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5508 bool current_link_up;
5509 u32 sg_dig_ctrl, sg_dig_status;
5510 u32 serdes_cfg, expected_sg_dig_ctrl;
5511 int workaround, port_a;
5514 expected_sg_dig_ctrl = 0;
5517 current_link_up = false;
5519 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5520 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5522 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5525 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5526 /* preserve bits 20-23 for voltage regulator */
5527 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5530 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5532 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5533 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5535 u32 val = serdes_cfg;
5541 tw32_f(MAC_SERDES_CFG, val);
5544 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5546 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5547 tg3_setup_flow_control(tp, 0, 0);
5548 current_link_up = true;
5553 /* Want auto-negotiation. */
5554 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5556 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5557 if (flowctrl & ADVERTISE_1000XPAUSE)
5558 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5559 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5560 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5562 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5563 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5564 tp->serdes_counter &&
5565 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5566 MAC_STATUS_RCVD_CFG)) ==
5567 MAC_STATUS_PCS_SYNCED)) {
5568 tp->serdes_counter--;
5569 current_link_up = true;
5574 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5575 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5577 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5579 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5580 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5581 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5582 MAC_STATUS_SIGNAL_DET)) {
5583 sg_dig_status = tr32(SG_DIG_STATUS);
5584 mac_status = tr32(MAC_STATUS);
5586 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5587 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5588 u32 local_adv = 0, remote_adv = 0;
5590 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5591 local_adv |= ADVERTISE_1000XPAUSE;
5592 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5593 local_adv |= ADVERTISE_1000XPSE_ASYM;
5595 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5596 remote_adv |= LPA_1000XPAUSE;
5597 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5598 remote_adv |= LPA_1000XPAUSE_ASYM;
5600 tp->link_config.rmt_adv =
5601 mii_adv_to_ethtool_adv_x(remote_adv);
5603 tg3_setup_flow_control(tp, local_adv, remote_adv);
5604 current_link_up = true;
5605 tp->serdes_counter = 0;
5606 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5607 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5608 if (tp->serdes_counter)
5609 tp->serdes_counter--;
5612 u32 val = serdes_cfg;
5619 tw32_f(MAC_SERDES_CFG, val);
5622 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5625 /* Link parallel detection - link is up */
5626 /* only if we have PCS_SYNC and not */
5627 /* receiving config code words */
5628 mac_status = tr32(MAC_STATUS);
5629 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5630 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5631 tg3_setup_flow_control(tp, 0, 0);
5632 current_link_up = true;
5634 TG3_PHYFLG_PARALLEL_DETECT;
5635 tp->serdes_counter =
5636 SERDES_PARALLEL_DET_TIMEOUT;
5638 goto restart_autoneg;
5642 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5643 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5647 return current_link_up;
5650 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5652 bool current_link_up = false;
5654 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5657 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5658 u32 txflags, rxflags;
5661 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5662 u32 local_adv = 0, remote_adv = 0;
5664 if (txflags & ANEG_CFG_PS1)
5665 local_adv |= ADVERTISE_1000XPAUSE;
5666 if (txflags & ANEG_CFG_PS2)
5667 local_adv |= ADVERTISE_1000XPSE_ASYM;
5669 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5670 remote_adv |= LPA_1000XPAUSE;
5671 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5672 remote_adv |= LPA_1000XPAUSE_ASYM;
5674 tp->link_config.rmt_adv =
5675 mii_adv_to_ethtool_adv_x(remote_adv);
5677 tg3_setup_flow_control(tp, local_adv, remote_adv);
5679 current_link_up = true;
5681 for (i = 0; i < 30; i++) {
5684 (MAC_STATUS_SYNC_CHANGED |
5685 MAC_STATUS_CFG_CHANGED));
5687 if ((tr32(MAC_STATUS) &
5688 (MAC_STATUS_SYNC_CHANGED |
5689 MAC_STATUS_CFG_CHANGED)) == 0)
5693 mac_status = tr32(MAC_STATUS);
5694 if (!current_link_up &&
5695 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5696 !(mac_status & MAC_STATUS_RCVD_CFG))
5697 current_link_up = true;
5699 tg3_setup_flow_control(tp, 0, 0);
5701 /* Forcing 1000FD link up. */
5702 current_link_up = true;
5704 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5707 tw32_f(MAC_MODE, tp->mac_mode);
5712 return current_link_up;
5715 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5718 u16 orig_active_speed;
5719 u8 orig_active_duplex;
5721 bool current_link_up;
5724 orig_pause_cfg = tp->link_config.active_flowctrl;
5725 orig_active_speed = tp->link_config.active_speed;
5726 orig_active_duplex = tp->link_config.active_duplex;
5728 if (!tg3_flag(tp, HW_AUTONEG) &&
5730 tg3_flag(tp, INIT_COMPLETE)) {
5731 mac_status = tr32(MAC_STATUS);
5732 mac_status &= (MAC_STATUS_PCS_SYNCED |
5733 MAC_STATUS_SIGNAL_DET |
5734 MAC_STATUS_CFG_CHANGED |
5735 MAC_STATUS_RCVD_CFG);
5736 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5737 MAC_STATUS_SIGNAL_DET)) {
5738 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5739 MAC_STATUS_CFG_CHANGED));
5744 tw32_f(MAC_TX_AUTO_NEG, 0);
5746 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5747 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5748 tw32_f(MAC_MODE, tp->mac_mode);
5751 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5752 tg3_init_bcm8002(tp);
5754 /* Enable link change event even when serdes polling. */
5755 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5758 current_link_up = false;
5759 tp->link_config.rmt_adv = 0;
5760 mac_status = tr32(MAC_STATUS);
5762 if (tg3_flag(tp, HW_AUTONEG))
5763 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5765 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5767 tp->napi[0].hw_status->status =
5768 (SD_STATUS_UPDATED |
5769 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5771 for (i = 0; i < 100; i++) {
5772 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5773 MAC_STATUS_CFG_CHANGED));
5775 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5776 MAC_STATUS_CFG_CHANGED |
5777 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5781 mac_status = tr32(MAC_STATUS);
5782 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5783 current_link_up = false;
5784 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5785 tp->serdes_counter == 0) {
5786 tw32_f(MAC_MODE, (tp->mac_mode |
5787 MAC_MODE_SEND_CONFIGS));
5789 tw32_f(MAC_MODE, tp->mac_mode);
5793 if (current_link_up) {
5794 tp->link_config.active_speed = SPEED_1000;
5795 tp->link_config.active_duplex = DUPLEX_FULL;
5796 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5797 LED_CTRL_LNKLED_OVERRIDE |
5798 LED_CTRL_1000MBPS_ON));
5800 tp->link_config.active_speed = SPEED_UNKNOWN;
5801 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5802 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5803 LED_CTRL_LNKLED_OVERRIDE |
5804 LED_CTRL_TRAFFIC_OVERRIDE));
5807 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5808 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5809 if (orig_pause_cfg != now_pause_cfg ||
5810 orig_active_speed != tp->link_config.active_speed ||
5811 orig_active_duplex != tp->link_config.active_duplex)
5812 tg3_link_report(tp);
5818 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5822 u16 current_speed = SPEED_UNKNOWN;
5823 u8 current_duplex = DUPLEX_UNKNOWN;
5824 bool current_link_up = false;
5825 u32 local_adv, remote_adv, sgsr;
5827 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5828 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5829 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5830 (sgsr & SERDES_TG3_SGMII_MODE)) {
5835 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5837 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5838 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5840 current_link_up = true;
5841 if (sgsr & SERDES_TG3_SPEED_1000) {
5842 current_speed = SPEED_1000;
5843 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5844 } else if (sgsr & SERDES_TG3_SPEED_100) {
5845 current_speed = SPEED_100;
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5848 current_speed = SPEED_10;
5849 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5852 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5853 current_duplex = DUPLEX_FULL;
5855 current_duplex = DUPLEX_HALF;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5861 tg3_clear_mac_status(tp);
5863 goto fiber_setup_done;
5866 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5867 tw32_f(MAC_MODE, tp->mac_mode);
5870 tg3_clear_mac_status(tp);
5875 tp->link_config.rmt_adv = 0;
5877 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5879 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5880 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5881 bmsr |= BMSR_LSTATUS;
5883 bmsr &= ~BMSR_LSTATUS;
5886 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5888 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5889 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5890 /* do nothing, just check for link up at the end */
5891 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5894 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5895 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5896 ADVERTISE_1000XPAUSE |
5897 ADVERTISE_1000XPSE_ASYM |
5900 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5901 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5903 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5904 tg3_writephy(tp, MII_ADVERTISE, newadv);
5905 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5906 tg3_writephy(tp, MII_BMCR, bmcr);
5908 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5909 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5910 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5917 bmcr &= ~BMCR_SPEED1000;
5918 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5920 if (tp->link_config.duplex == DUPLEX_FULL)
5921 new_bmcr |= BMCR_FULLDPLX;
5923 if (new_bmcr != bmcr) {
5924 /* BMCR_SPEED1000 is a reserved bit that needs
5925 * to be set on write.
5927 new_bmcr |= BMCR_SPEED1000;
5929 /* Force a linkdown */
5933 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5934 adv &= ~(ADVERTISE_1000XFULL |
5935 ADVERTISE_1000XHALF |
5937 tg3_writephy(tp, MII_ADVERTISE, adv);
5938 tg3_writephy(tp, MII_BMCR, bmcr |
5942 tg3_carrier_off(tp);
5944 tg3_writephy(tp, MII_BMCR, new_bmcr);
5946 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5947 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5948 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5949 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5950 bmsr |= BMSR_LSTATUS;
5952 bmsr &= ~BMSR_LSTATUS;
5954 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5958 if (bmsr & BMSR_LSTATUS) {
5959 current_speed = SPEED_1000;
5960 current_link_up = true;
5961 if (bmcr & BMCR_FULLDPLX)
5962 current_duplex = DUPLEX_FULL;
5964 current_duplex = DUPLEX_HALF;
5969 if (bmcr & BMCR_ANENABLE) {
5972 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5973 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5974 common = local_adv & remote_adv;
5975 if (common & (ADVERTISE_1000XHALF |
5976 ADVERTISE_1000XFULL)) {
5977 if (common & ADVERTISE_1000XFULL)
5978 current_duplex = DUPLEX_FULL;
5980 current_duplex = DUPLEX_HALF;
5982 tp->link_config.rmt_adv =
5983 mii_adv_to_ethtool_adv_x(remote_adv);
5984 } else if (!tg3_flag(tp, 5780_CLASS)) {
5985 /* Link is up via parallel detect */
5987 current_link_up = false;
5993 if (current_link_up && current_duplex == DUPLEX_FULL)
5994 tg3_setup_flow_control(tp, local_adv, remote_adv);
5996 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5997 if (tp->link_config.active_duplex == DUPLEX_HALF)
5998 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6000 tw32_f(MAC_MODE, tp->mac_mode);
6003 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6005 tp->link_config.active_speed = current_speed;
6006 tp->link_config.active_duplex = current_duplex;
6008 tg3_test_and_report_link_chg(tp, current_link_up);
6012 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6014 if (tp->serdes_counter) {
6015 /* Give autoneg time to complete. */
6016 tp->serdes_counter--;
6021 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6024 tg3_readphy(tp, MII_BMCR, &bmcr);
6025 if (bmcr & BMCR_ANENABLE) {
6028 /* Select shadow register 0x1f */
6029 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6030 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6032 /* Select expansion interrupt status register */
6033 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6034 MII_TG3_DSP_EXP1_INT_STAT);
6035 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6038 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6039 /* We have signal detect and not receiving
6040 * config code words, link is up by parallel
6044 bmcr &= ~BMCR_ANENABLE;
6045 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6046 tg3_writephy(tp, MII_BMCR, bmcr);
6047 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6050 } else if (tp->link_up &&
6051 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6052 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6055 /* Select expansion interrupt status register */
6056 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6057 MII_TG3_DSP_EXP1_INT_STAT);
6058 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6062 /* Config code words received, turn on autoneg. */
6063 tg3_readphy(tp, MII_BMCR, &bmcr);
6064 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6066 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6072 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6077 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6078 err = tg3_setup_fiber_phy(tp, force_reset);
6079 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6080 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6082 err = tg3_setup_copper_phy(tp, force_reset);
6084 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6087 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6088 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6090 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6095 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6096 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6097 tw32(GRC_MISC_CFG, val);
6100 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6101 (6 << TX_LENGTHS_IPG_SHIFT);
6102 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6103 tg3_asic_rev(tp) == ASIC_REV_5762)
6104 val |= tr32(MAC_TX_LENGTHS) &
6105 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6106 TX_LENGTHS_CNT_DWN_VAL_MSK);
6108 if (tp->link_config.active_speed == SPEED_1000 &&
6109 tp->link_config.active_duplex == DUPLEX_HALF)
6110 tw32(MAC_TX_LENGTHS, val |
6111 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6113 tw32(MAC_TX_LENGTHS, val |
6114 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6116 if (!tg3_flag(tp, 5705_PLUS)) {
6118 tw32(HOSTCC_STAT_COAL_TICKS,
6119 tp->coal.stats_block_coalesce_usecs);
6121 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6125 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6126 val = tr32(PCIE_PWR_MGMT_THRESH);
6128 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6131 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6132 tw32(PCIE_PWR_MGMT_THRESH, val);
6138 /* tp->lock must be held */
6139 static u64 tg3_refclk_read(struct tg3 *tp)
6141 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6142 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6145 /* tp->lock must be held */
6146 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6148 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6150 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6151 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6152 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6153 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6156 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6157 static inline void tg3_full_unlock(struct tg3 *tp);
6158 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6160 struct tg3 *tp = netdev_priv(dev);
6162 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6163 SOF_TIMESTAMPING_RX_SOFTWARE |
6164 SOF_TIMESTAMPING_SOFTWARE;
6166 if (tg3_flag(tp, PTP_CAPABLE)) {
6167 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6168 SOF_TIMESTAMPING_RX_HARDWARE |
6169 SOF_TIMESTAMPING_RAW_HARDWARE;
6173 info->phc_index = ptp_clock_index(tp->ptp_clock);
6175 info->phc_index = -1;
6177 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6179 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6180 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6181 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6182 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6186 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6188 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6189 bool neg_adj = false;
6197 /* Frequency adjustment is performed using hardware with a 24 bit
6198 * accumulator and a programmable correction value. On each clk, the
6199 * correction value gets added to the accumulator and when it
6200 * overflows, the time counter is incremented/decremented.
6202 * So conversion from ppb to correction value is
6203 * ppb * (1 << 24) / 1000000000
6205 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6206 TG3_EAV_REF_CLK_CORRECT_MASK;
6208 tg3_full_lock(tp, 0);
6211 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6212 TG3_EAV_REF_CLK_CORRECT_EN |
6213 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6215 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6217 tg3_full_unlock(tp);
6222 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6224 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6226 tg3_full_lock(tp, 0);
6227 tp->ptp_adjust += delta;
6228 tg3_full_unlock(tp);
6233 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6236 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6238 tg3_full_lock(tp, 0);
6239 ns = tg3_refclk_read(tp);
6240 ns += tp->ptp_adjust;
6241 tg3_full_unlock(tp);
6243 *ts = ns_to_timespec64(ns);
6248 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6249 const struct timespec64 *ts)
6252 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6254 ns = timespec64_to_ns(ts);
6256 tg3_full_lock(tp, 0);
6257 tg3_refclk_write(tp, ns);
6259 tg3_full_unlock(tp);
6264 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6265 struct ptp_clock_request *rq, int on)
6267 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6272 case PTP_CLK_REQ_PEROUT:
6273 if (rq->perout.index != 0)
6276 tg3_full_lock(tp, 0);
6277 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6278 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6283 nsec = rq->perout.start.sec * 1000000000ULL +
6284 rq->perout.start.nsec;
6286 if (rq->perout.period.sec || rq->perout.period.nsec) {
6287 netdev_warn(tp->dev,
6288 "Device supports only a one-shot timesync output, period must be 0\n");
6293 if (nsec & (1ULL << 63)) {
6294 netdev_warn(tp->dev,
6295 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6300 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6301 tw32(TG3_EAV_WATCHDOG0_MSB,
6302 TG3_EAV_WATCHDOG0_EN |
6303 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6305 tw32(TG3_EAV_REF_CLCK_CTL,
6306 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6308 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6309 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6313 tg3_full_unlock(tp);
6323 static const struct ptp_clock_info tg3_ptp_caps = {
6324 .owner = THIS_MODULE,
6325 .name = "tg3 clock",
6326 .max_adj = 250000000,
6332 .adjfreq = tg3_ptp_adjfreq,
6333 .adjtime = tg3_ptp_adjtime,
6334 .gettime64 = tg3_ptp_gettime,
6335 .settime64 = tg3_ptp_settime,
6336 .enable = tg3_ptp_enable,
6339 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6340 struct skb_shared_hwtstamps *timestamp)
6342 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6343 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6347 /* tp->lock must be held */
6348 static void tg3_ptp_init(struct tg3 *tp)
6350 if (!tg3_flag(tp, PTP_CAPABLE))
6353 /* Initialize the hardware clock to the system time. */
6354 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6356 tp->ptp_info = tg3_ptp_caps;
6359 /* tp->lock must be held */
6360 static void tg3_ptp_resume(struct tg3 *tp)
6362 if (!tg3_flag(tp, PTP_CAPABLE))
6365 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6369 static void tg3_ptp_fini(struct tg3 *tp)
6371 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6374 ptp_clock_unregister(tp->ptp_clock);
6375 tp->ptp_clock = NULL;
6379 static inline int tg3_irq_sync(struct tg3 *tp)
6381 return tp->irq_sync;
6384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6388 dst = (u32 *)((u8 *)dst + off);
6389 for (i = 0; i < len; i += sizeof(u32))
6390 *dst++ = tr32(off + i);
6393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6395 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6396 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6397 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6398 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6399 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6400 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6401 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6402 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6404 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6405 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6406 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6407 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6408 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6409 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6410 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6411 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6412 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6413 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6415 if (tg3_flag(tp, SUPPORT_MSIX))
6416 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6418 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6419 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6420 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6421 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6422 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6424 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6425 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6427 if (!tg3_flag(tp, 5705_PLUS)) {
6428 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6429 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6430 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6433 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6434 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6435 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6436 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6437 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6439 if (tg3_flag(tp, NVRAM))
6440 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6443 static void tg3_dump_state(struct tg3 *tp)
6448 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6452 if (tg3_flag(tp, PCI_EXPRESS)) {
6453 /* Read up to but not including private PCI registers */
6454 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6455 regs[i / sizeof(u32)] = tr32(i);
6457 tg3_dump_legacy_regs(tp, regs);
6459 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6460 if (!regs[i + 0] && !regs[i + 1] &&
6461 !regs[i + 2] && !regs[i + 3])
6464 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6466 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6471 for (i = 0; i < tp->irq_cnt; i++) {
6472 struct tg3_napi *tnapi = &tp->napi[i];
6474 /* SW status block */
6476 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6478 tnapi->hw_status->status,
6479 tnapi->hw_status->status_tag,
6480 tnapi->hw_status->rx_jumbo_consumer,
6481 tnapi->hw_status->rx_consumer,
6482 tnapi->hw_status->rx_mini_consumer,
6483 tnapi->hw_status->idx[0].rx_producer,
6484 tnapi->hw_status->idx[0].tx_consumer);
6487 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6489 tnapi->last_tag, tnapi->last_irq_tag,
6490 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6492 tnapi->prodring.rx_std_prod_idx,
6493 tnapi->prodring.rx_std_cons_idx,
6494 tnapi->prodring.rx_jmb_prod_idx,
6495 tnapi->prodring.rx_jmb_cons_idx);
6499 /* This is called whenever we suspect that the system chipset is re-
6500 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6501 * is bogus tx completions. We try to recover by setting the
6502 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6505 static void tg3_tx_recover(struct tg3 *tp)
6507 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6508 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6510 netdev_warn(tp->dev,
6511 "The system may be re-ordering memory-mapped I/O "
6512 "cycles to the network device, attempting to recover. "
6513 "Please report the problem to the driver maintainer "
6514 "and include system chipset information.\n");
6516 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6519 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6521 /* Tell compiler to fetch tx indices from memory. */
6523 return tnapi->tx_pending -
6524 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6527 /* Tigon3 never reports partial packet sends. So we do not
6528 * need special logic to handle SKBs that have not had all
6529 * of their frags sent yet, like SunGEM does.
6531 static void tg3_tx(struct tg3_napi *tnapi)
6533 struct tg3 *tp = tnapi->tp;
6534 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6535 u32 sw_idx = tnapi->tx_cons;
6536 struct netdev_queue *txq;
6537 int index = tnapi - tp->napi;
6538 unsigned int pkts_compl = 0, bytes_compl = 0;
6540 if (tg3_flag(tp, ENABLE_TSS))
6543 txq = netdev_get_tx_queue(tp->dev, index);
6545 while (sw_idx != hw_idx) {
6546 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6547 struct sk_buff *skb = ri->skb;
6550 if (unlikely(skb == NULL)) {
6555 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6556 struct skb_shared_hwtstamps timestamp;
6557 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6558 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6560 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6562 skb_tstamp_tx(skb, ×tamp);
6565 pci_unmap_single(tp->pdev,
6566 dma_unmap_addr(ri, mapping),
6572 while (ri->fragmented) {
6573 ri->fragmented = false;
6574 sw_idx = NEXT_TX(sw_idx);
6575 ri = &tnapi->tx_buffers[sw_idx];
6578 sw_idx = NEXT_TX(sw_idx);
6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6581 ri = &tnapi->tx_buffers[sw_idx];
6582 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6585 pci_unmap_page(tp->pdev,
6586 dma_unmap_addr(ri, mapping),
6587 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6590 while (ri->fragmented) {
6591 ri->fragmented = false;
6592 sw_idx = NEXT_TX(sw_idx);
6593 ri = &tnapi->tx_buffers[sw_idx];
6596 sw_idx = NEXT_TX(sw_idx);
6600 bytes_compl += skb->len;
6602 dev_consume_skb_any(skb);
6604 if (unlikely(tx_bug)) {
6610 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6612 tnapi->tx_cons = sw_idx;
6614 /* Need to make the tx_cons update visible to tg3_start_xmit()
6615 * before checking for netif_queue_stopped(). Without the
6616 * memory barrier, there is a small possibility that tg3_start_xmit()
6617 * will miss it and cause the queue to be stopped forever.
6621 if (unlikely(netif_tx_queue_stopped(txq) &&
6622 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6623 __netif_tx_lock(txq, smp_processor_id());
6624 if (netif_tx_queue_stopped(txq) &&
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6626 netif_tx_wake_queue(txq);
6627 __netif_tx_unlock(txq);
6631 static void tg3_frag_free(bool is_frag, void *data)
6634 skb_free_frag(data);
6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6641 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6647 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6648 map_sz, PCI_DMA_FROMDEVICE);
6649 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6654 /* Returns size of skb allocated or < 0 on error.
6656 * We only need to fill in the address because the other members
6657 * of the RX descriptor are invariant, see tg3_init_rings.
6659 * Note the purposeful assymetry of cpu vs. chip accesses. For
6660 * posting buffers we only dirty the first cache line of the RX
6661 * descriptor (containing the address). Whereas for the RX status
6662 * buffers the cpu only reads the last cacheline of the RX descriptor
6663 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6666 u32 opaque_key, u32 dest_idx_unmasked,
6667 unsigned int *frag_size)
6669 struct tg3_rx_buffer_desc *desc;
6670 struct ring_info *map;
6673 int skb_size, data_size, dest_idx;
6675 switch (opaque_key) {
6676 case RXD_OPAQUE_RING_STD:
6677 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6678 desc = &tpr->rx_std[dest_idx];
6679 map = &tpr->rx_std_buffers[dest_idx];
6680 data_size = tp->rx_pkt_map_sz;
6683 case RXD_OPAQUE_RING_JUMBO:
6684 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6685 desc = &tpr->rx_jmb[dest_idx].std;
6686 map = &tpr->rx_jmb_buffers[dest_idx];
6687 data_size = TG3_RX_JMB_MAP_SZ;
6694 /* Do not overwrite any of the map or rp information
6695 * until we are sure we can commit to a new buffer.
6697 * Callers depend upon this behavior and assume that
6698 * we leave everything unchanged if we fail.
6700 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6702 if (skb_size <= PAGE_SIZE) {
6703 data = netdev_alloc_frag(skb_size);
6704 *frag_size = skb_size;
6706 data = kmalloc(skb_size, GFP_ATOMIC);
6712 mapping = pci_map_single(tp->pdev,
6713 data + TG3_RX_OFFSET(tp),
6715 PCI_DMA_FROMDEVICE);
6716 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6717 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6722 dma_unmap_addr_set(map, mapping, mapping);
6724 desc->addr_hi = ((u64)mapping >> 32);
6725 desc->addr_lo = ((u64)mapping & 0xffffffff);
6730 /* We only need to move over in the address because the other
6731 * members of the RX descriptor are invariant. See notes above
6732 * tg3_alloc_rx_data for full details.
6734 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6735 struct tg3_rx_prodring_set *dpr,
6736 u32 opaque_key, int src_idx,
6737 u32 dest_idx_unmasked)
6739 struct tg3 *tp = tnapi->tp;
6740 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6741 struct ring_info *src_map, *dest_map;
6742 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6745 switch (opaque_key) {
6746 case RXD_OPAQUE_RING_STD:
6747 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6748 dest_desc = &dpr->rx_std[dest_idx];
6749 dest_map = &dpr->rx_std_buffers[dest_idx];
6750 src_desc = &spr->rx_std[src_idx];
6751 src_map = &spr->rx_std_buffers[src_idx];
6754 case RXD_OPAQUE_RING_JUMBO:
6755 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6756 dest_desc = &dpr->rx_jmb[dest_idx].std;
6757 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6758 src_desc = &spr->rx_jmb[src_idx].std;
6759 src_map = &spr->rx_jmb_buffers[src_idx];
6766 dest_map->data = src_map->data;
6767 dma_unmap_addr_set(dest_map, mapping,
6768 dma_unmap_addr(src_map, mapping));
6769 dest_desc->addr_hi = src_desc->addr_hi;
6770 dest_desc->addr_lo = src_desc->addr_lo;
6772 /* Ensure that the update to the skb happens after the physical
6773 * addresses have been transferred to the new BD location.
6777 src_map->data = NULL;
6780 /* The RX ring scheme is composed of multiple rings which post fresh
6781 * buffers to the chip, and one special ring the chip uses to report
6782 * status back to the host.
6784 * The special ring reports the status of received packets to the
6785 * host. The chip does not write into the original descriptor the
6786 * RX buffer was obtained from. The chip simply takes the original
6787 * descriptor as provided by the host, updates the status and length
6788 * field, then writes this into the next status ring entry.
6790 * Each ring the host uses to post buffers to the chip is described
6791 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6792 * it is first placed into the on-chip ram. When the packet's length
6793 * is known, it walks down the TG3_BDINFO entries to select the ring.
6794 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6795 * which is within the range of the new packet's length is chosen.
6797 * The "separate ring for rx status" scheme may sound queer, but it makes
6798 * sense from a cache coherency perspective. If only the host writes
6799 * to the buffer post rings, and only the chip writes to the rx status
6800 * rings, then cache lines never move beyond shared-modified state.
6801 * If both the host and chip were to write into the same ring, cache line
6802 * eviction could occur since both entities want it in an exclusive state.
6804 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6806 struct tg3 *tp = tnapi->tp;
6807 u32 work_mask, rx_std_posted = 0;
6808 u32 std_prod_idx, jmb_prod_idx;
6809 u32 sw_idx = tnapi->rx_rcb_ptr;
6812 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6814 hw_idx = *(tnapi->rx_rcb_prod_idx);
6816 * We need to order the read of hw_idx and the read of
6817 * the opaque cookie.
6822 std_prod_idx = tpr->rx_std_prod_idx;
6823 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6824 while (sw_idx != hw_idx && budget > 0) {
6825 struct ring_info *ri;
6826 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6828 struct sk_buff *skb;
6829 dma_addr_t dma_addr;
6830 u32 opaque_key, desc_idx, *post_ptr;
6834 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6835 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6836 if (opaque_key == RXD_OPAQUE_RING_STD) {
6837 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6838 dma_addr = dma_unmap_addr(ri, mapping);
6840 post_ptr = &std_prod_idx;
6842 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6843 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6844 dma_addr = dma_unmap_addr(ri, mapping);
6846 post_ptr = &jmb_prod_idx;
6848 goto next_pkt_nopost;
6850 work_mask |= opaque_key;
6852 if (desc->err_vlan & RXD_ERR_MASK) {
6854 tg3_recycle_rx(tnapi, tpr, opaque_key,
6855 desc_idx, *post_ptr);
6857 /* Other statistics kept track of by card. */
6862 prefetch(data + TG3_RX_OFFSET(tp));
6863 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6866 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867 RXD_FLAG_PTPSTAT_PTPV1 ||
6868 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6869 RXD_FLAG_PTPSTAT_PTPV2) {
6870 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6871 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6874 if (len > TG3_RX_COPY_THRESH(tp)) {
6876 unsigned int frag_size;
6878 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6879 *post_ptr, &frag_size);
6883 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6884 PCI_DMA_FROMDEVICE);
6886 /* Ensure that the update to the data happens
6887 * after the usage of the old DMA mapping.
6893 skb = build_skb(data, frag_size);
6895 tg3_frag_free(frag_size != 0, data);
6896 goto drop_it_no_recycle;
6898 skb_reserve(skb, TG3_RX_OFFSET(tp));
6900 tg3_recycle_rx(tnapi, tpr, opaque_key,
6901 desc_idx, *post_ptr);
6903 skb = netdev_alloc_skb(tp->dev,
6904 len + TG3_RAW_IP_ALIGN);
6906 goto drop_it_no_recycle;
6908 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6909 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6911 data + TG3_RX_OFFSET(tp),
6913 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6918 tg3_hwclock_to_timestamp(tp, tstamp,
6919 skb_hwtstamps(skb));
6921 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6922 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6923 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6924 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6925 skb->ip_summed = CHECKSUM_UNNECESSARY;
6927 skb_checksum_none_assert(skb);
6929 skb->protocol = eth_type_trans(skb, tp->dev);
6931 if (len > (tp->dev->mtu + ETH_HLEN) &&
6932 skb->protocol != htons(ETH_P_8021Q) &&
6933 skb->protocol != htons(ETH_P_8021AD)) {
6934 dev_kfree_skb_any(skb);
6935 goto drop_it_no_recycle;
6938 if (desc->type_flags & RXD_FLAG_VLAN &&
6939 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6940 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6941 desc->err_vlan & RXD_VLAN_MASK);
6943 napi_gro_receive(&tnapi->napi, skb);
6951 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6952 tpr->rx_std_prod_idx = std_prod_idx &
6953 tp->rx_std_ring_mask;
6954 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6955 tpr->rx_std_prod_idx);
6956 work_mask &= ~RXD_OPAQUE_RING_STD;
6961 sw_idx &= tp->rx_ret_ring_mask;
6963 /* Refresh hw_idx to see if there is new work */
6964 if (sw_idx == hw_idx) {
6965 hw_idx = *(tnapi->rx_rcb_prod_idx);
6970 /* ACK the status ring. */
6971 tnapi->rx_rcb_ptr = sw_idx;
6972 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6974 /* Refill RX ring(s). */
6975 if (!tg3_flag(tp, ENABLE_RSS)) {
6976 /* Sync BD data before updating mailbox */
6979 if (work_mask & RXD_OPAQUE_RING_STD) {
6980 tpr->rx_std_prod_idx = std_prod_idx &
6981 tp->rx_std_ring_mask;
6982 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6983 tpr->rx_std_prod_idx);
6985 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6986 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6987 tp->rx_jmb_ring_mask;
6988 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6989 tpr->rx_jmb_prod_idx);
6992 } else if (work_mask) {
6993 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6994 * updated before the producer indices can be updated.
6998 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6999 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7001 if (tnapi != &tp->napi[1]) {
7002 tp->rx_refill = true;
7003 napi_schedule(&tp->napi[1].napi);
7010 static void tg3_poll_link(struct tg3 *tp)
7012 /* handle link change and other phy events */
7013 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7014 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7016 if (sblk->status & SD_STATUS_LINK_CHG) {
7017 sblk->status = SD_STATUS_UPDATED |
7018 (sblk->status & ~SD_STATUS_LINK_CHG);
7019 spin_lock(&tp->lock);
7020 if (tg3_flag(tp, USE_PHYLIB)) {
7022 (MAC_STATUS_SYNC_CHANGED |
7023 MAC_STATUS_CFG_CHANGED |
7024 MAC_STATUS_MI_COMPLETION |
7025 MAC_STATUS_LNKSTATE_CHANGED));
7028 tg3_setup_phy(tp, false);
7029 spin_unlock(&tp->lock);
7034 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7035 struct tg3_rx_prodring_set *dpr,
7036 struct tg3_rx_prodring_set *spr)
7038 u32 si, di, cpycnt, src_prod_idx;
7042 src_prod_idx = spr->rx_std_prod_idx;
7044 /* Make sure updates to the rx_std_buffers[] entries and the
7045 * standard producer index are seen in the correct order.
7049 if (spr->rx_std_cons_idx == src_prod_idx)
7052 if (spr->rx_std_cons_idx < src_prod_idx)
7053 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7055 cpycnt = tp->rx_std_ring_mask + 1 -
7056 spr->rx_std_cons_idx;
7058 cpycnt = min(cpycnt,
7059 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7061 si = spr->rx_std_cons_idx;
7062 di = dpr->rx_std_prod_idx;
7064 for (i = di; i < di + cpycnt; i++) {
7065 if (dpr->rx_std_buffers[i].data) {
7075 /* Ensure that updates to the rx_std_buffers ring and the
7076 * shadowed hardware producer ring from tg3_recycle_skb() are
7077 * ordered correctly WRT the skb check above.
7081 memcpy(&dpr->rx_std_buffers[di],
7082 &spr->rx_std_buffers[si],
7083 cpycnt * sizeof(struct ring_info));
7085 for (i = 0; i < cpycnt; i++, di++, si++) {
7086 struct tg3_rx_buffer_desc *sbd, *dbd;
7087 sbd = &spr->rx_std[si];
7088 dbd = &dpr->rx_std[di];
7089 dbd->addr_hi = sbd->addr_hi;
7090 dbd->addr_lo = sbd->addr_lo;
7093 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7094 tp->rx_std_ring_mask;
7095 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7096 tp->rx_std_ring_mask;
7100 src_prod_idx = spr->rx_jmb_prod_idx;
7102 /* Make sure updates to the rx_jmb_buffers[] entries and
7103 * the jumbo producer index are seen in the correct order.
7107 if (spr->rx_jmb_cons_idx == src_prod_idx)
7110 if (spr->rx_jmb_cons_idx < src_prod_idx)
7111 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7113 cpycnt = tp->rx_jmb_ring_mask + 1 -
7114 spr->rx_jmb_cons_idx;
7116 cpycnt = min(cpycnt,
7117 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7119 si = spr->rx_jmb_cons_idx;
7120 di = dpr->rx_jmb_prod_idx;
7122 for (i = di; i < di + cpycnt; i++) {
7123 if (dpr->rx_jmb_buffers[i].data) {
7133 /* Ensure that updates to the rx_jmb_buffers ring and the
7134 * shadowed hardware producer ring from tg3_recycle_skb() are
7135 * ordered correctly WRT the skb check above.
7139 memcpy(&dpr->rx_jmb_buffers[di],
7140 &spr->rx_jmb_buffers[si],
7141 cpycnt * sizeof(struct ring_info));
7143 for (i = 0; i < cpycnt; i++, di++, si++) {
7144 struct tg3_rx_buffer_desc *sbd, *dbd;
7145 sbd = &spr->rx_jmb[si].std;
7146 dbd = &dpr->rx_jmb[di].std;
7147 dbd->addr_hi = sbd->addr_hi;
7148 dbd->addr_lo = sbd->addr_lo;
7151 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7152 tp->rx_jmb_ring_mask;
7153 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7154 tp->rx_jmb_ring_mask;
7160 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7162 struct tg3 *tp = tnapi->tp;
7164 /* run TX completion thread */
7165 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7167 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 if (!tnapi->rx_rcb_prod_idx)
7174 /* run RX thread, within the bounds set by NAPI.
7175 * All RX "locking" is done by ensuring outside
7176 * code synchronizes with tg3->napi.poll()
7178 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7179 work_done += tg3_rx(tnapi, budget - work_done);
7181 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7182 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7184 u32 std_prod_idx = dpr->rx_std_prod_idx;
7185 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7187 tp->rx_refill = false;
7188 for (i = 1; i <= tp->rxq_cnt; i++)
7189 err |= tg3_rx_prodring_xfer(tp, dpr,
7190 &tp->napi[i].prodring);
7194 if (std_prod_idx != dpr->rx_std_prod_idx)
7195 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7196 dpr->rx_std_prod_idx);
7198 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7199 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7200 dpr->rx_jmb_prod_idx);
7205 tw32_f(HOSTCC_MODE, tp->coal_now);
7211 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7213 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7214 schedule_work(&tp->reset_task);
7217 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7219 cancel_work_sync(&tp->reset_task);
7220 tg3_flag_clear(tp, RESET_TASK_PENDING);
7221 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7224 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7226 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7227 struct tg3 *tp = tnapi->tp;
7229 struct tg3_hw_status *sblk = tnapi->hw_status;
7232 work_done = tg3_poll_work(tnapi, work_done, budget);
7234 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7237 if (unlikely(work_done >= budget))
7240 /* tp->last_tag is used in tg3_int_reenable() below
7241 * to tell the hw how much work has been processed,
7242 * so we must read it before checking for more work.
7244 tnapi->last_tag = sblk->status_tag;
7245 tnapi->last_irq_tag = tnapi->last_tag;
7248 /* check for RX/TX work to do */
7249 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7250 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7252 /* This test here is not race free, but will reduce
7253 * the number of interrupts by looping again.
7255 if (tnapi == &tp->napi[1] && tp->rx_refill)
7258 napi_complete_done(napi, work_done);
7259 /* Reenable interrupts. */
7260 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7262 /* This test here is synchronized by napi_schedule()
7263 * and napi_complete() to close the race condition.
7265 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7266 tw32(HOSTCC_MODE, tp->coalesce_mode |
7267 HOSTCC_MODE_ENABLE |
7275 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7279 /* work_done is guaranteed to be less than budget. */
7280 napi_complete(napi);
7281 tg3_reset_task_schedule(tp);
7285 static void tg3_process_error(struct tg3 *tp)
7288 bool real_error = false;
7290 if (tg3_flag(tp, ERROR_PROCESSED))
7293 /* Check Flow Attention register */
7294 val = tr32(HOSTCC_FLOW_ATTN);
7295 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7296 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7300 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7301 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7305 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7306 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7315 tg3_flag_set(tp, ERROR_PROCESSED);
7316 tg3_reset_task_schedule(tp);
7319 static int tg3_poll(struct napi_struct *napi, int budget)
7321 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7322 struct tg3 *tp = tnapi->tp;
7324 struct tg3_hw_status *sblk = tnapi->hw_status;
7327 if (sblk->status & SD_STATUS_ERROR)
7328 tg3_process_error(tp);
7332 work_done = tg3_poll_work(tnapi, work_done, budget);
7334 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7337 if (unlikely(work_done >= budget))
7340 if (tg3_flag(tp, TAGGED_STATUS)) {
7341 /* tp->last_tag is used in tg3_int_reenable() below
7342 * to tell the hw how much work has been processed,
7343 * so we must read it before checking for more work.
7345 tnapi->last_tag = sblk->status_tag;
7346 tnapi->last_irq_tag = tnapi->last_tag;
7349 sblk->status &= ~SD_STATUS_UPDATED;
7351 if (likely(!tg3_has_work(tnapi))) {
7352 napi_complete_done(napi, work_done);
7353 tg3_int_reenable(tnapi);
7358 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7362 /* work_done is guaranteed to be less than budget. */
7363 napi_complete(napi);
7364 tg3_reset_task_schedule(tp);
7368 static void tg3_napi_disable(struct tg3 *tp)
7372 for (i = tp->irq_cnt - 1; i >= 0; i--)
7373 napi_disable(&tp->napi[i].napi);
7376 static void tg3_napi_enable(struct tg3 *tp)
7380 for (i = 0; i < tp->irq_cnt; i++)
7381 napi_enable(&tp->napi[i].napi);
7384 static void tg3_napi_init(struct tg3 *tp)
7388 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7389 for (i = 1; i < tp->irq_cnt; i++)
7390 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7393 static void tg3_napi_fini(struct tg3 *tp)
7397 for (i = 0; i < tp->irq_cnt; i++)
7398 netif_napi_del(&tp->napi[i].napi);
7401 static inline void tg3_netif_stop(struct tg3 *tp)
7403 netif_trans_update(tp->dev); /* prevent tx timeout */
7404 tg3_napi_disable(tp);
7405 netif_carrier_off(tp->dev);
7406 netif_tx_disable(tp->dev);
7409 /* tp->lock must be held */
7410 static inline void tg3_netif_start(struct tg3 *tp)
7414 /* NOTE: unconditional netif_tx_wake_all_queues is only
7415 * appropriate so long as all callers are assured to
7416 * have free tx slots (such as after tg3_init_hw)
7418 netif_tx_wake_all_queues(tp->dev);
7421 netif_carrier_on(tp->dev);
7423 tg3_napi_enable(tp);
7424 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7425 tg3_enable_ints(tp);
7428 static void tg3_irq_quiesce(struct tg3 *tp)
7429 __releases(tp->lock)
7430 __acquires(tp->lock)
7434 BUG_ON(tp->irq_sync);
7439 spin_unlock_bh(&tp->lock);
7441 for (i = 0; i < tp->irq_cnt; i++)
7442 synchronize_irq(tp->napi[i].irq_vec);
7444 spin_lock_bh(&tp->lock);
7447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7448 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7449 * with as well. Most of the time, this is not necessary except when
7450 * shutting down the device.
7452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7454 spin_lock_bh(&tp->lock);
7456 tg3_irq_quiesce(tp);
7459 static inline void tg3_full_unlock(struct tg3 *tp)
7461 spin_unlock_bh(&tp->lock);
7464 /* One-shot MSI handler - Chip automatically disables interrupt
7465 * after sending MSI so driver doesn't have to do it.
7467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7469 struct tg3_napi *tnapi = dev_id;
7470 struct tg3 *tp = tnapi->tp;
7472 prefetch(tnapi->hw_status);
7474 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7476 if (likely(!tg3_irq_sync(tp)))
7477 napi_schedule(&tnapi->napi);
7482 /* MSI ISR - No need to check for interrupt sharing and no need to
7483 * flush status block and interrupt mailbox. PCI ordering rules
7484 * guarantee that MSI will arrive after the status block.
7486 static irqreturn_t tg3_msi(int irq, void *dev_id)
7488 struct tg3_napi *tnapi = dev_id;
7489 struct tg3 *tp = tnapi->tp;
7491 prefetch(tnapi->hw_status);
7493 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7495 * Writing any value to intr-mbox-0 clears PCI INTA# and
7496 * chip-internal interrupt pending events.
7497 * Writing non-zero to intr-mbox-0 additional tells the
7498 * NIC to stop sending us irqs, engaging "in-intr-handler"
7501 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7502 if (likely(!tg3_irq_sync(tp)))
7503 napi_schedule(&tnapi->napi);
7505 return IRQ_RETVAL(1);
7508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7510 struct tg3_napi *tnapi = dev_id;
7511 struct tg3 *tp = tnapi->tp;
7512 struct tg3_hw_status *sblk = tnapi->hw_status;
7513 unsigned int handled = 1;
7515 /* In INTx mode, it is possible for the interrupt to arrive at
7516 * the CPU before the status block posted prior to the interrupt.
7517 * Reading the PCI State register will confirm whether the
7518 * interrupt is ours and will flush the status block.
7520 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7521 if (tg3_flag(tp, CHIP_RESETTING) ||
7522 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7529 * Writing any value to intr-mbox-0 clears PCI INTA# and
7530 * chip-internal interrupt pending events.
7531 * Writing non-zero to intr-mbox-0 additional tells the
7532 * NIC to stop sending us irqs, engaging "in-intr-handler"
7535 * Flush the mailbox to de-assert the IRQ immediately to prevent
7536 * spurious interrupts. The flush impacts performance but
7537 * excessive spurious interrupts can be worse in some cases.
7539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7540 if (tg3_irq_sync(tp))
7542 sblk->status &= ~SD_STATUS_UPDATED;
7543 if (likely(tg3_has_work(tnapi))) {
7544 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7545 napi_schedule(&tnapi->napi);
7547 /* No work, shared interrupt perhaps? re-enable
7548 * interrupts, and flush that PCI write
7550 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7554 return IRQ_RETVAL(handled);
7557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7559 struct tg3_napi *tnapi = dev_id;
7560 struct tg3 *tp = tnapi->tp;
7561 struct tg3_hw_status *sblk = tnapi->hw_status;
7562 unsigned int handled = 1;
7564 /* In INTx mode, it is possible for the interrupt to arrive at
7565 * the CPU before the status block posted prior to the interrupt.
7566 * Reading the PCI State register will confirm whether the
7567 * interrupt is ours and will flush the status block.
7569 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7570 if (tg3_flag(tp, CHIP_RESETTING) ||
7571 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7578 * writing any value to intr-mbox-0 clears PCI INTA# and
7579 * chip-internal interrupt pending events.
7580 * writing non-zero to intr-mbox-0 additional tells the
7581 * NIC to stop sending us irqs, engaging "in-intr-handler"
7584 * Flush the mailbox to de-assert the IRQ immediately to prevent
7585 * spurious interrupts. The flush impacts performance but
7586 * excessive spurious interrupts can be worse in some cases.
7588 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7591 * In a shared interrupt configuration, sometimes other devices'
7592 * interrupts will scream. We record the current status tag here
7593 * so that the above check can report that the screaming interrupts
7594 * are unhandled. Eventually they will be silenced.
7596 tnapi->last_irq_tag = sblk->status_tag;
7598 if (tg3_irq_sync(tp))
7601 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7603 napi_schedule(&tnapi->napi);
7606 return IRQ_RETVAL(handled);
7609 /* ISR for interrupt test */
7610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7612 struct tg3_napi *tnapi = dev_id;
7613 struct tg3 *tp = tnapi->tp;
7614 struct tg3_hw_status *sblk = tnapi->hw_status;
7616 if ((sblk->status & SD_STATUS_UPDATED) ||
7617 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7618 tg3_disable_ints(tp);
7619 return IRQ_RETVAL(1);
7621 return IRQ_RETVAL(0);
7624 #ifdef CONFIG_NET_POLL_CONTROLLER
7625 static void tg3_poll_controller(struct net_device *dev)
7628 struct tg3 *tp = netdev_priv(dev);
7630 if (tg3_irq_sync(tp))
7633 for (i = 0; i < tp->irq_cnt; i++)
7634 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7638 static void tg3_tx_timeout(struct net_device *dev)
7640 struct tg3 *tp = netdev_priv(dev);
7642 if (netif_msg_tx_err(tp)) {
7643 netdev_err(dev, "transmit timed out, resetting\n");
7647 tg3_reset_task_schedule(tp);
7650 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7651 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7653 u32 base = (u32) mapping & 0xffffffff;
7655 return base + len + 8 < base;
7658 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7659 * of any 4GB boundaries: 4G, 8G, etc
7661 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7664 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7665 u32 base = (u32) mapping & 0xffffffff;
7667 return ((base + len + (mss & 0x3fff)) < base);
7672 /* Test for DMA addresses > 40-bit */
7673 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7676 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7677 if (tg3_flag(tp, 40BIT_DMA_BUG))
7678 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7685 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7686 dma_addr_t mapping, u32 len, u32 flags,
7689 txbd->addr_hi = ((u64) mapping >> 32);
7690 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7691 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7692 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7695 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7696 dma_addr_t map, u32 len, u32 flags,
7699 struct tg3 *tp = tnapi->tp;
7702 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7705 if (tg3_4g_overflow_test(map, len))
7708 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7711 if (tg3_40bit_overflow_test(tp, map, len))
7714 if (tp->dma_limit) {
7715 u32 prvidx = *entry;
7716 u32 tmp_flag = flags & ~TXD_FLAG_END;
7717 while (len > tp->dma_limit && *budget) {
7718 u32 frag_len = tp->dma_limit;
7719 len -= tp->dma_limit;
7721 /* Avoid the 8byte DMA problem */
7723 len += tp->dma_limit / 2;
7724 frag_len = tp->dma_limit / 2;
7727 tnapi->tx_buffers[*entry].fragmented = true;
7729 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7730 frag_len, tmp_flag, mss, vlan);
7733 *entry = NEXT_TX(*entry);
7740 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7741 len, flags, mss, vlan);
7743 *entry = NEXT_TX(*entry);
7746 tnapi->tx_buffers[prvidx].fragmented = false;
7750 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7751 len, flags, mss, vlan);
7752 *entry = NEXT_TX(*entry);
7758 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7761 struct sk_buff *skb;
7762 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7767 pci_unmap_single(tnapi->tp->pdev,
7768 dma_unmap_addr(txb, mapping),
7772 while (txb->fragmented) {
7773 txb->fragmented = false;
7774 entry = NEXT_TX(entry);
7775 txb = &tnapi->tx_buffers[entry];
7778 for (i = 0; i <= last; i++) {
7779 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7781 entry = NEXT_TX(entry);
7782 txb = &tnapi->tx_buffers[entry];
7784 pci_unmap_page(tnapi->tp->pdev,
7785 dma_unmap_addr(txb, mapping),
7786 skb_frag_size(frag), PCI_DMA_TODEVICE);
7788 while (txb->fragmented) {
7789 txb->fragmented = false;
7790 entry = NEXT_TX(entry);
7791 txb = &tnapi->tx_buffers[entry];
7796 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7797 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7798 struct sk_buff **pskb,
7799 u32 *entry, u32 *budget,
7800 u32 base_flags, u32 mss, u32 vlan)
7802 struct tg3 *tp = tnapi->tp;
7803 struct sk_buff *new_skb, *skb = *pskb;
7804 dma_addr_t new_addr = 0;
7807 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7808 new_skb = skb_copy(skb, GFP_ATOMIC);
7810 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7812 new_skb = skb_copy_expand(skb,
7813 skb_headroom(skb) + more_headroom,
7814 skb_tailroom(skb), GFP_ATOMIC);
7820 /* New SKB is guaranteed to be linear. */
7821 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7823 /* Make sure the mapping succeeded */
7824 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7825 dev_kfree_skb_any(new_skb);
7828 u32 save_entry = *entry;
7830 base_flags |= TXD_FLAG_END;
7832 tnapi->tx_buffers[*entry].skb = new_skb;
7833 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7836 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7837 new_skb->len, base_flags,
7839 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7840 dev_kfree_skb_any(new_skb);
7846 dev_consume_skb_any(skb);
7851 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7853 /* Check if we will never have enough descriptors,
7854 * as gso_segs can be more than current ring size
7856 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7859 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7861 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7862 * indicated in tg3_tx_frag_set()
7864 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7865 struct netdev_queue *txq, struct sk_buff *skb)
7867 struct sk_buff *segs, *nskb;
7868 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7870 /* Estimate the number of fragments in the worst case */
7871 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7872 netif_tx_stop_queue(txq);
7874 /* netif_tx_stop_queue() must be done before checking
7875 * checking tx index in tg3_tx_avail() below, because in
7876 * tg3_tx(), we update tx index before checking for
7877 * netif_tx_queue_stopped().
7880 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7881 return NETDEV_TX_BUSY;
7883 netif_tx_wake_queue(txq);
7886 segs = skb_gso_segment(skb, tp->dev->features &
7887 ~(NETIF_F_TSO | NETIF_F_TSO6));
7888 if (IS_ERR(segs) || !segs)
7889 goto tg3_tso_bug_end;
7895 tg3_start_xmit(nskb, tp->dev);
7899 dev_consume_skb_any(skb);
7901 return NETDEV_TX_OK;
7904 /* hard_start_xmit for all devices */
7905 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7907 struct tg3 *tp = netdev_priv(dev);
7908 u32 len, entry, base_flags, mss, vlan = 0;
7910 int i = -1, would_hit_hwbug;
7912 struct tg3_napi *tnapi;
7913 struct netdev_queue *txq;
7915 struct iphdr *iph = NULL;
7916 struct tcphdr *tcph = NULL;
7917 __sum16 tcp_csum = 0, ip_csum = 0;
7918 __be16 ip_tot_len = 0;
7920 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7921 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7922 if (tg3_flag(tp, ENABLE_TSS))
7925 budget = tg3_tx_avail(tnapi);
7927 /* We are running in BH disabled context with netif_tx_lock
7928 * and TX reclaim runs via tp->napi.poll inside of a software
7929 * interrupt. Furthermore, IRQ processing runs lockless so we have
7930 * no IRQ context deadlocks to worry about either. Rejoice!
7932 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7933 if (!netif_tx_queue_stopped(txq)) {
7934 netif_tx_stop_queue(txq);
7936 /* This is a hard error, log it. */
7938 "BUG! Tx Ring full when queue awake!\n");
7940 return NETDEV_TX_BUSY;
7943 entry = tnapi->tx_prod;
7946 mss = skb_shinfo(skb)->gso_size;
7948 u32 tcp_opt_len, hdr_len;
7950 if (skb_cow_head(skb, 0))
7954 tcp_opt_len = tcp_optlen(skb);
7956 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7958 /* HW/FW can not correctly segment packets that have been
7959 * vlan encapsulated.
7961 if (skb->protocol == htons(ETH_P_8021Q) ||
7962 skb->protocol == htons(ETH_P_8021AD)) {
7963 if (tg3_tso_bug_gso_check(tnapi, skb))
7964 return tg3_tso_bug(tp, tnapi, txq, skb);
7968 if (!skb_is_gso_v6(skb)) {
7969 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7970 tg3_flag(tp, TSO_BUG)) {
7971 if (tg3_tso_bug_gso_check(tnapi, skb))
7972 return tg3_tso_bug(tp, tnapi, txq, skb);
7975 ip_csum = iph->check;
7976 ip_tot_len = iph->tot_len;
7978 iph->tot_len = htons(mss + hdr_len);
7981 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7982 TXD_FLAG_CPU_POST_DMA);
7984 tcph = tcp_hdr(skb);
7985 tcp_csum = tcph->check;
7987 if (tg3_flag(tp, HW_TSO_1) ||
7988 tg3_flag(tp, HW_TSO_2) ||
7989 tg3_flag(tp, HW_TSO_3)) {
7991 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7993 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7997 if (tg3_flag(tp, HW_TSO_3)) {
7998 mss |= (hdr_len & 0xc) << 12;
8000 base_flags |= 0x00000010;
8001 base_flags |= (hdr_len & 0x3e0) << 5;
8002 } else if (tg3_flag(tp, HW_TSO_2))
8003 mss |= hdr_len << 9;
8004 else if (tg3_flag(tp, HW_TSO_1) ||
8005 tg3_asic_rev(tp) == ASIC_REV_5705) {
8006 if (tcp_opt_len || iph->ihl > 5) {
8009 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8010 mss |= (tsflags << 11);
8013 if (tcp_opt_len || iph->ihl > 5) {
8016 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8017 base_flags |= tsflags << 12;
8020 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8021 /* HW/FW can not correctly checksum packets that have been
8022 * vlan encapsulated.
8024 if (skb->protocol == htons(ETH_P_8021Q) ||
8025 skb->protocol == htons(ETH_P_8021AD)) {
8026 if (skb_checksum_help(skb))
8029 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8033 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8034 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8035 base_flags |= TXD_FLAG_JMB_PKT;
8037 if (skb_vlan_tag_present(skb)) {
8038 base_flags |= TXD_FLAG_VLAN;
8039 vlan = skb_vlan_tag_get(skb);
8042 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8043 tg3_flag(tp, TX_TSTAMP_EN)) {
8044 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8045 base_flags |= TXD_FLAG_HWTSTAMP;
8048 len = skb_headlen(skb);
8050 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8051 if (pci_dma_mapping_error(tp->pdev, mapping))
8055 tnapi->tx_buffers[entry].skb = skb;
8056 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8058 would_hit_hwbug = 0;
8060 if (tg3_flag(tp, 5701_DMA_BUG))
8061 would_hit_hwbug = 1;
8063 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8064 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8066 would_hit_hwbug = 1;
8067 } else if (skb_shinfo(skb)->nr_frags > 0) {
8070 if (!tg3_flag(tp, HW_TSO_1) &&
8071 !tg3_flag(tp, HW_TSO_2) &&
8072 !tg3_flag(tp, HW_TSO_3))
8075 /* Now loop through additional data
8076 * fragments, and queue them.
8078 last = skb_shinfo(skb)->nr_frags - 1;
8079 for (i = 0; i <= last; i++) {
8080 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8082 len = skb_frag_size(frag);
8083 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8084 len, DMA_TO_DEVICE);
8086 tnapi->tx_buffers[entry].skb = NULL;
8087 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8089 if (dma_mapping_error(&tp->pdev->dev, mapping))
8093 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8095 ((i == last) ? TXD_FLAG_END : 0),
8097 would_hit_hwbug = 1;
8103 if (would_hit_hwbug) {
8104 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8106 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8107 /* If it's a TSO packet, do GSO instead of
8108 * allocating and copying to a large linear SKB
8111 iph->check = ip_csum;
8112 iph->tot_len = ip_tot_len;
8114 tcph->check = tcp_csum;
8115 return tg3_tso_bug(tp, tnapi, txq, skb);
8118 /* If the workaround fails due to memory/mapping
8119 * failure, silently drop this packet.
8121 entry = tnapi->tx_prod;
8122 budget = tg3_tx_avail(tnapi);
8123 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8124 base_flags, mss, vlan))
8128 skb_tx_timestamp(skb);
8129 netdev_tx_sent_queue(txq, skb->len);
8131 /* Sync BD data before updating mailbox */
8134 tnapi->tx_prod = entry;
8135 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8136 netif_tx_stop_queue(txq);
8138 /* netif_tx_stop_queue() must be done before checking
8139 * checking tx index in tg3_tx_avail() below, because in
8140 * tg3_tx(), we update tx index before checking for
8141 * netif_tx_queue_stopped().
8144 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8145 netif_tx_wake_queue(txq);
8148 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8149 /* Packets are ready, update Tx producer idx on card. */
8150 tw32_tx_mbox(tnapi->prodmbox, entry);
8154 return NETDEV_TX_OK;
8157 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8158 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8160 dev_kfree_skb_any(skb);
8163 return NETDEV_TX_OK;
8166 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8169 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8170 MAC_MODE_PORT_MODE_MASK);
8172 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8174 if (!tg3_flag(tp, 5705_PLUS))
8175 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8177 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8178 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8180 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8182 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8184 if (tg3_flag(tp, 5705_PLUS) ||
8185 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8186 tg3_asic_rev(tp) == ASIC_REV_5700)
8187 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8190 tw32(MAC_MODE, tp->mac_mode);
8194 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8196 u32 val, bmcr, mac_mode, ptest = 0;
8198 tg3_phy_toggle_apd(tp, false);
8199 tg3_phy_toggle_automdix(tp, false);
8201 if (extlpbk && tg3_phy_set_extloopbk(tp))
8204 bmcr = BMCR_FULLDPLX;
8209 bmcr |= BMCR_SPEED100;
8213 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8215 bmcr |= BMCR_SPEED100;
8218 bmcr |= BMCR_SPEED1000;
8223 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8224 tg3_readphy(tp, MII_CTRL1000, &val);
8225 val |= CTL1000_AS_MASTER |
8226 CTL1000_ENABLE_MASTER;
8227 tg3_writephy(tp, MII_CTRL1000, val);
8229 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8230 MII_TG3_FET_PTEST_TRIM_2;
8231 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8234 bmcr |= BMCR_LOOPBACK;
8236 tg3_writephy(tp, MII_BMCR, bmcr);
8238 /* The write needs to be flushed for the FETs */
8239 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8240 tg3_readphy(tp, MII_BMCR, &bmcr);
8244 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8245 tg3_asic_rev(tp) == ASIC_REV_5785) {
8246 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8247 MII_TG3_FET_PTEST_FRC_TX_LINK |
8248 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8250 /* The write needs to be flushed for the AC131 */
8251 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8254 /* Reset to prevent losing 1st rx packet intermittently */
8255 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8256 tg3_flag(tp, 5780_CLASS)) {
8257 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8259 tw32_f(MAC_RX_MODE, tp->rx_mode);
8262 mac_mode = tp->mac_mode &
8263 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8264 if (speed == SPEED_1000)
8265 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8267 mac_mode |= MAC_MODE_PORT_MODE_MII;
8269 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8270 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8272 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8273 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8274 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8275 mac_mode |= MAC_MODE_LINK_POLARITY;
8277 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8278 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8281 tw32(MAC_MODE, mac_mode);
8287 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8289 struct tg3 *tp = netdev_priv(dev);
8291 if (features & NETIF_F_LOOPBACK) {
8292 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8295 spin_lock_bh(&tp->lock);
8296 tg3_mac_loopback(tp, true);
8297 netif_carrier_on(tp->dev);
8298 spin_unlock_bh(&tp->lock);
8299 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8301 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8304 spin_lock_bh(&tp->lock);
8305 tg3_mac_loopback(tp, false);
8306 /* Force link status check */
8307 tg3_setup_phy(tp, true);
8308 spin_unlock_bh(&tp->lock);
8309 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8313 static netdev_features_t tg3_fix_features(struct net_device *dev,
8314 netdev_features_t features)
8316 struct tg3 *tp = netdev_priv(dev);
8318 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8319 features &= ~NETIF_F_ALL_TSO;
8324 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8326 netdev_features_t changed = dev->features ^ features;
8328 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8329 tg3_set_loopback(dev, features);
8334 static void tg3_rx_prodring_free(struct tg3 *tp,
8335 struct tg3_rx_prodring_set *tpr)
8339 if (tpr != &tp->napi[0].prodring) {
8340 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8341 i = (i + 1) & tp->rx_std_ring_mask)
8342 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8345 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8346 for (i = tpr->rx_jmb_cons_idx;
8347 i != tpr->rx_jmb_prod_idx;
8348 i = (i + 1) & tp->rx_jmb_ring_mask) {
8349 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8357 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8358 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8361 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8362 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8363 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8368 /* Initialize rx rings for packet processing.
8370 * The chip has been shut down and the driver detached from
8371 * the networking, so no interrupts or new tx packets will
8372 * end up in the driver. tp->{tx,}lock are held and thus
8375 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8376 struct tg3_rx_prodring_set *tpr)
8378 u32 i, rx_pkt_dma_sz;
8380 tpr->rx_std_cons_idx = 0;
8381 tpr->rx_std_prod_idx = 0;
8382 tpr->rx_jmb_cons_idx = 0;
8383 tpr->rx_jmb_prod_idx = 0;
8385 if (tpr != &tp->napi[0].prodring) {
8386 memset(&tpr->rx_std_buffers[0], 0,
8387 TG3_RX_STD_BUFF_RING_SIZE(tp));
8388 if (tpr->rx_jmb_buffers)
8389 memset(&tpr->rx_jmb_buffers[0], 0,
8390 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8394 /* Zero out all descriptors. */
8395 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8397 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8398 if (tg3_flag(tp, 5780_CLASS) &&
8399 tp->dev->mtu > ETH_DATA_LEN)
8400 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8401 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8403 /* Initialize invariants of the rings, we only set this
8404 * stuff once. This works because the card does not
8405 * write into the rx buffer posting rings.
8407 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8408 struct tg3_rx_buffer_desc *rxd;
8410 rxd = &tpr->rx_std[i];
8411 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8412 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8413 rxd->opaque = (RXD_OPAQUE_RING_STD |
8414 (i << RXD_OPAQUE_INDEX_SHIFT));
8417 /* Now allocate fresh SKBs for each rx ring. */
8418 for (i = 0; i < tp->rx_pending; i++) {
8419 unsigned int frag_size;
8421 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8423 netdev_warn(tp->dev,
8424 "Using a smaller RX standard ring. Only "
8425 "%d out of %d buffers were allocated "
8426 "successfully\n", i, tp->rx_pending);
8434 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8437 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8439 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8442 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8443 struct tg3_rx_buffer_desc *rxd;
8445 rxd = &tpr->rx_jmb[i].std;
8446 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8447 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8449 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8450 (i << RXD_OPAQUE_INDEX_SHIFT));
8453 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8454 unsigned int frag_size;
8456 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8458 netdev_warn(tp->dev,
8459 "Using a smaller RX jumbo ring. Only %d "
8460 "out of %d buffers were allocated "
8461 "successfully\n", i, tp->rx_jumbo_pending);
8464 tp->rx_jumbo_pending = i;
8473 tg3_rx_prodring_free(tp, tpr);
8477 static void tg3_rx_prodring_fini(struct tg3 *tp,
8478 struct tg3_rx_prodring_set *tpr)
8480 kfree(tpr->rx_std_buffers);
8481 tpr->rx_std_buffers = NULL;
8482 kfree(tpr->rx_jmb_buffers);
8483 tpr->rx_jmb_buffers = NULL;
8485 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8486 tpr->rx_std, tpr->rx_std_mapping);
8490 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8491 tpr->rx_jmb, tpr->rx_jmb_mapping);
8496 static int tg3_rx_prodring_init(struct tg3 *tp,
8497 struct tg3_rx_prodring_set *tpr)
8499 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8501 if (!tpr->rx_std_buffers)
8504 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8505 TG3_RX_STD_RING_BYTES(tp),
8506 &tpr->rx_std_mapping,
8511 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8512 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8514 if (!tpr->rx_jmb_buffers)
8517 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8518 TG3_RX_JMB_RING_BYTES(tp),
8519 &tpr->rx_jmb_mapping,
8528 tg3_rx_prodring_fini(tp, tpr);
8532 /* Free up pending packets in all rx/tx rings.
8534 * The chip has been shut down and the driver detached from
8535 * the networking, so no interrupts or new tx packets will
8536 * end up in the driver. tp->{tx,}lock is not held and we are not
8537 * in an interrupt context and thus may sleep.
8539 static void tg3_free_rings(struct tg3 *tp)
8543 for (j = 0; j < tp->irq_cnt; j++) {
8544 struct tg3_napi *tnapi = &tp->napi[j];
8546 tg3_rx_prodring_free(tp, &tnapi->prodring);
8548 if (!tnapi->tx_buffers)
8551 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8552 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8557 tg3_tx_skb_unmap(tnapi, i,
8558 skb_shinfo(skb)->nr_frags - 1);
8560 dev_consume_skb_any(skb);
8562 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8566 /* Initialize tx/rx rings for packet processing.
8568 * The chip has been shut down and the driver detached from
8569 * the networking, so no interrupts or new tx packets will
8570 * end up in the driver. tp->{tx,}lock are held and thus
8573 static int tg3_init_rings(struct tg3 *tp)
8577 /* Free up all the SKBs. */
8580 for (i = 0; i < tp->irq_cnt; i++) {
8581 struct tg3_napi *tnapi = &tp->napi[i];
8583 tnapi->last_tag = 0;
8584 tnapi->last_irq_tag = 0;
8585 tnapi->hw_status->status = 0;
8586 tnapi->hw_status->status_tag = 0;
8587 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8592 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8594 tnapi->rx_rcb_ptr = 0;
8596 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8598 if (tnapi->prodring.rx_std &&
8599 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8608 static void tg3_mem_tx_release(struct tg3 *tp)
8612 for (i = 0; i < tp->irq_max; i++) {
8613 struct tg3_napi *tnapi = &tp->napi[i];
8615 if (tnapi->tx_ring) {
8616 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8617 tnapi->tx_ring, tnapi->tx_desc_mapping);
8618 tnapi->tx_ring = NULL;
8621 kfree(tnapi->tx_buffers);
8622 tnapi->tx_buffers = NULL;
8626 static int tg3_mem_tx_acquire(struct tg3 *tp)
8629 struct tg3_napi *tnapi = &tp->napi[0];
8631 /* If multivector TSS is enabled, vector 0 does not handle
8632 * tx interrupts. Don't allocate any resources for it.
8634 if (tg3_flag(tp, ENABLE_TSS))
8637 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8638 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8639 sizeof(struct tg3_tx_ring_info),
8641 if (!tnapi->tx_buffers)
8644 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8646 &tnapi->tx_desc_mapping,
8648 if (!tnapi->tx_ring)
8655 tg3_mem_tx_release(tp);
8659 static void tg3_mem_rx_release(struct tg3 *tp)
8663 for (i = 0; i < tp->irq_max; i++) {
8664 struct tg3_napi *tnapi = &tp->napi[i];
8666 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8671 dma_free_coherent(&tp->pdev->dev,
8672 TG3_RX_RCB_RING_BYTES(tp),
8674 tnapi->rx_rcb_mapping);
8675 tnapi->rx_rcb = NULL;
8679 static int tg3_mem_rx_acquire(struct tg3 *tp)
8681 unsigned int i, limit;
8683 limit = tp->rxq_cnt;
8685 /* If RSS is enabled, we need a (dummy) producer ring
8686 * set on vector zero. This is the true hw prodring.
8688 if (tg3_flag(tp, ENABLE_RSS))
8691 for (i = 0; i < limit; i++) {
8692 struct tg3_napi *tnapi = &tp->napi[i];
8694 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8697 /* If multivector RSS is enabled, vector 0
8698 * does not handle rx or tx interrupts.
8699 * Don't allocate any resources for it.
8701 if (!i && tg3_flag(tp, ENABLE_RSS))
8704 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8705 TG3_RX_RCB_RING_BYTES(tp),
8706 &tnapi->rx_rcb_mapping,
8715 tg3_mem_rx_release(tp);
8720 * Must not be invoked with interrupt sources disabled and
8721 * the hardware shutdown down.
8723 static void tg3_free_consistent(struct tg3 *tp)
8727 for (i = 0; i < tp->irq_cnt; i++) {
8728 struct tg3_napi *tnapi = &tp->napi[i];
8730 if (tnapi->hw_status) {
8731 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8733 tnapi->status_mapping);
8734 tnapi->hw_status = NULL;
8738 tg3_mem_rx_release(tp);
8739 tg3_mem_tx_release(tp);
8741 /* tp->hw_stats can be referenced safely:
8742 * 1. under rtnl_lock
8743 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8746 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8747 tp->hw_stats, tp->stats_mapping);
8748 tp->hw_stats = NULL;
8753 * Must not be invoked with interrupt sources disabled and
8754 * the hardware shutdown down. Can sleep.
8756 static int tg3_alloc_consistent(struct tg3 *tp)
8760 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8761 sizeof(struct tg3_hw_stats),
8762 &tp->stats_mapping, GFP_KERNEL);
8766 for (i = 0; i < tp->irq_cnt; i++) {
8767 struct tg3_napi *tnapi = &tp->napi[i];
8768 struct tg3_hw_status *sblk;
8770 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8772 &tnapi->status_mapping,
8774 if (!tnapi->hw_status)
8777 sblk = tnapi->hw_status;
8779 if (tg3_flag(tp, ENABLE_RSS)) {
8780 u16 *prodptr = NULL;
8783 * When RSS is enabled, the status block format changes
8784 * slightly. The "rx_jumbo_consumer", "reserved",
8785 * and "rx_mini_consumer" members get mapped to the
8786 * other three rx return ring producer indexes.
8790 prodptr = &sblk->idx[0].rx_producer;
8793 prodptr = &sblk->rx_jumbo_consumer;
8796 prodptr = &sblk->reserved;
8799 prodptr = &sblk->rx_mini_consumer;
8802 tnapi->rx_rcb_prod_idx = prodptr;
8804 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8808 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8814 tg3_free_consistent(tp);
8818 #define MAX_WAIT_CNT 1000
8820 /* To stop a block, clear the enable bit and poll till it
8821 * clears. tp->lock is held.
8823 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8828 if (tg3_flag(tp, 5705_PLUS)) {
8835 /* We can't enable/disable these bits of the
8836 * 5705/5750, just say success.
8849 for (i = 0; i < MAX_WAIT_CNT; i++) {
8850 if (pci_channel_offline(tp->pdev)) {
8851 dev_err(&tp->pdev->dev,
8852 "tg3_stop_block device offline, "
8853 "ofs=%lx enable_bit=%x\n",
8860 if ((val & enable_bit) == 0)
8864 if (i == MAX_WAIT_CNT && !silent) {
8865 dev_err(&tp->pdev->dev,
8866 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8874 /* tp->lock is held. */
8875 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8879 tg3_disable_ints(tp);
8881 if (pci_channel_offline(tp->pdev)) {
8882 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8883 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8888 tp->rx_mode &= ~RX_MODE_ENABLE;
8889 tw32_f(MAC_RX_MODE, tp->rx_mode);
8892 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8901 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8902 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8907 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8908 tw32_f(MAC_MODE, tp->mac_mode);
8911 tp->tx_mode &= ~TX_MODE_ENABLE;
8912 tw32_f(MAC_TX_MODE, tp->tx_mode);
8914 for (i = 0; i < MAX_WAIT_CNT; i++) {
8916 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8919 if (i >= MAX_WAIT_CNT) {
8920 dev_err(&tp->pdev->dev,
8921 "%s timed out, TX_MODE_ENABLE will not clear "
8922 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8926 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8927 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8928 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8930 tw32(FTQ_RESET, 0xffffffff);
8931 tw32(FTQ_RESET, 0x00000000);
8933 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8934 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8937 for (i = 0; i < tp->irq_cnt; i++) {
8938 struct tg3_napi *tnapi = &tp->napi[i];
8939 if (tnapi->hw_status)
8940 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8946 /* Save PCI command register before chip reset */
8947 static void tg3_save_pci_state(struct tg3 *tp)
8949 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8952 /* Restore PCI state after chip reset */
8953 static void tg3_restore_pci_state(struct tg3 *tp)
8957 /* Re-enable indirect register accesses. */
8958 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8959 tp->misc_host_ctrl);
8961 /* Set MAX PCI retry to zero. */
8962 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8963 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8964 tg3_flag(tp, PCIX_MODE))
8965 val |= PCISTATE_RETRY_SAME_DMA;
8966 /* Allow reads and writes to the APE register and memory space. */
8967 if (tg3_flag(tp, ENABLE_APE))
8968 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8969 PCISTATE_ALLOW_APE_SHMEM_WR |
8970 PCISTATE_ALLOW_APE_PSPACE_WR;
8971 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8973 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8975 if (!tg3_flag(tp, PCI_EXPRESS)) {
8976 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8977 tp->pci_cacheline_sz);
8978 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8982 /* Make sure PCI-X relaxed ordering bit is clear. */
8983 if (tg3_flag(tp, PCIX_MODE)) {
8986 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988 pcix_cmd &= ~PCI_X_CMD_ERO;
8989 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8993 if (tg3_flag(tp, 5780_CLASS)) {
8995 /* Chip reset on 5780 will reset MSI enable bit,
8996 * so need to restore it.
8998 if (tg3_flag(tp, USING_MSI)) {
9001 pci_read_config_word(tp->pdev,
9002 tp->msi_cap + PCI_MSI_FLAGS,
9004 pci_write_config_word(tp->pdev,
9005 tp->msi_cap + PCI_MSI_FLAGS,
9006 ctrl | PCI_MSI_FLAGS_ENABLE);
9007 val = tr32(MSGINT_MODE);
9008 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9013 static void tg3_override_clk(struct tg3 *tp)
9017 switch (tg3_asic_rev(tp)) {
9019 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9020 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9021 TG3_CPMU_MAC_ORIDE_ENABLE);
9026 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9034 static void tg3_restore_clk(struct tg3 *tp)
9038 switch (tg3_asic_rev(tp)) {
9040 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9041 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9042 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9047 val = tr32(TG3_CPMU_CLCK_ORIDE);
9048 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9056 /* tp->lock is held. */
9057 static int tg3_chip_reset(struct tg3 *tp)
9058 __releases(tp->lock)
9059 __acquires(tp->lock)
9062 void (*write_op)(struct tg3 *, u32, u32);
9065 if (!pci_device_is_present(tp->pdev))
9070 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9072 /* No matching tg3_nvram_unlock() after this because
9073 * chip reset below will undo the nvram lock.
9075 tp->nvram_lock_cnt = 0;
9077 /* GRC_MISC_CFG core clock reset will clear the memory
9078 * enable bit in PCI register 4 and the MSI enable bit
9079 * on some chips, so we save relevant registers here.
9081 tg3_save_pci_state(tp);
9083 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9084 tg3_flag(tp, 5755_PLUS))
9085 tw32(GRC_FASTBOOT_PC, 0);
9088 * We must avoid the readl() that normally takes place.
9089 * It locks machines, causes machine checks, and other
9090 * fun things. So, temporarily disable the 5701
9091 * hardware workaround, while we do the reset.
9093 write_op = tp->write32;
9094 if (write_op == tg3_write_flush_reg32)
9095 tp->write32 = tg3_write32;
9097 /* Prevent the irq handler from reading or writing PCI registers
9098 * during chip reset when the memory enable bit in the PCI command
9099 * register may be cleared. The chip does not generate interrupt
9100 * at this time, but the irq handler may still be called due to irq
9101 * sharing or irqpoll.
9103 tg3_flag_set(tp, CHIP_RESETTING);
9104 for (i = 0; i < tp->irq_cnt; i++) {
9105 struct tg3_napi *tnapi = &tp->napi[i];
9106 if (tnapi->hw_status) {
9107 tnapi->hw_status->status = 0;
9108 tnapi->hw_status->status_tag = 0;
9110 tnapi->last_tag = 0;
9111 tnapi->last_irq_tag = 0;
9115 tg3_full_unlock(tp);
9117 for (i = 0; i < tp->irq_cnt; i++)
9118 synchronize_irq(tp->napi[i].irq_vec);
9120 tg3_full_lock(tp, 0);
9122 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9123 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9124 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9128 val = GRC_MISC_CFG_CORECLK_RESET;
9130 if (tg3_flag(tp, PCI_EXPRESS)) {
9131 /* Force PCIe 1.0a mode */
9132 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9133 !tg3_flag(tp, 57765_PLUS) &&
9134 tr32(TG3_PCIE_PHY_TSTCTL) ==
9135 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9136 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9138 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9139 tw32(GRC_MISC_CFG, (1 << 29));
9144 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9145 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9146 tw32(GRC_VCPU_EXT_CTRL,
9147 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9150 /* Set the clock to the highest frequency to avoid timeouts. With link
9151 * aware mode, the clock speed could be slow and bootcode does not
9152 * complete within the expected time. Override the clock to allow the
9153 * bootcode to finish sooner and then restore it.
9155 tg3_override_clk(tp);
9157 /* Manage gphy power for all CPMU absent PCIe devices. */
9158 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9159 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9161 tw32(GRC_MISC_CFG, val);
9163 /* restore 5701 hardware bug workaround write method */
9164 tp->write32 = write_op;
9166 /* Unfortunately, we have to delay before the PCI read back.
9167 * Some 575X chips even will not respond to a PCI cfg access
9168 * when the reset command is given to the chip.
9170 * How do these hardware designers expect things to work
9171 * properly if the PCI write is posted for a long period
9172 * of time? It is always necessary to have some method by
9173 * which a register read back can occur to push the write
9174 * out which does the reset.
9176 * For most tg3 variants the trick below was working.
9181 /* Flush PCI posted writes. The normal MMIO registers
9182 * are inaccessible at this time so this is the only
9183 * way to make this reliably (actually, this is no longer
9184 * the case, see above). I tried to use indirect
9185 * register read/write but this upset some 5701 variants.
9187 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9191 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9194 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9198 /* Wait for link training to complete. */
9199 for (j = 0; j < 5000; j++)
9202 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9203 pci_write_config_dword(tp->pdev, 0xc4,
9204 cfg_val | (1 << 15));
9207 /* Clear the "no snoop" and "relaxed ordering" bits. */
9208 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9210 * Older PCIe devices only support the 128 byte
9211 * MPS setting. Enforce the restriction.
9213 if (!tg3_flag(tp, CPMU_PRESENT))
9214 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9215 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9217 /* Clear error status */
9218 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9219 PCI_EXP_DEVSTA_CED |
9220 PCI_EXP_DEVSTA_NFED |
9221 PCI_EXP_DEVSTA_FED |
9222 PCI_EXP_DEVSTA_URD);
9225 tg3_restore_pci_state(tp);
9227 tg3_flag_clear(tp, CHIP_RESETTING);
9228 tg3_flag_clear(tp, ERROR_PROCESSED);
9231 if (tg3_flag(tp, 5780_CLASS))
9232 val = tr32(MEMARB_MODE);
9233 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9235 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9237 tw32(0x5000, 0x400);
9240 if (tg3_flag(tp, IS_SSB_CORE)) {
9242 * BCM4785: In order to avoid repercussions from using
9243 * potentially defective internal ROM, stop the Rx RISC CPU,
9244 * which is not required.
9247 tg3_halt_cpu(tp, RX_CPU_BASE);
9250 err = tg3_poll_fw(tp);
9254 tw32(GRC_MODE, tp->grc_mode);
9256 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9259 tw32(0xc4, val | (1 << 15));
9262 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9263 tg3_asic_rev(tp) == ASIC_REV_5705) {
9264 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9265 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9266 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9267 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9270 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9271 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9273 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9274 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9279 tw32_f(MAC_MODE, val);
9282 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9286 if (tg3_flag(tp, PCI_EXPRESS) &&
9287 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9288 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9289 !tg3_flag(tp, 57765_PLUS)) {
9292 tw32(0x7c00, val | (1 << 25));
9295 tg3_restore_clk(tp);
9297 /* Increase the core clock speed to fix tx timeout issue for 5762
9298 * with 100Mbps link speed.
9300 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9301 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9302 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9303 TG3_CPMU_MAC_ORIDE_ENABLE);
9306 /* Reprobe ASF enable state. */
9307 tg3_flag_clear(tp, ENABLE_ASF);
9308 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9309 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9311 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9312 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9313 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9317 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9318 tg3_flag_set(tp, ENABLE_ASF);
9319 tp->last_event_jiffies = jiffies;
9320 if (tg3_flag(tp, 5750_PLUS))
9321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9323 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9324 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9325 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9326 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9327 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9334 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9335 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9336 static void __tg3_set_rx_mode(struct net_device *);
9338 /* tp->lock is held. */
9339 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9345 tg3_write_sig_pre_reset(tp, kind);
9347 tg3_abort_hw(tp, silent);
9348 err = tg3_chip_reset(tp);
9350 __tg3_set_mac_addr(tp, false);
9352 tg3_write_sig_legacy(tp, kind);
9353 tg3_write_sig_post_reset(tp, kind);
9356 /* Save the stats across chip resets... */
9357 tg3_get_nstats(tp, &tp->net_stats_prev);
9358 tg3_get_estats(tp, &tp->estats_prev);
9360 /* And make sure the next sample is new data */
9361 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9367 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9369 struct tg3 *tp = netdev_priv(dev);
9370 struct sockaddr *addr = p;
9372 bool skip_mac_1 = false;
9374 if (!is_valid_ether_addr(addr->sa_data))
9375 return -EADDRNOTAVAIL;
9377 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9379 if (!netif_running(dev))
9382 if (tg3_flag(tp, ENABLE_ASF)) {
9383 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9385 addr0_high = tr32(MAC_ADDR_0_HIGH);
9386 addr0_low = tr32(MAC_ADDR_0_LOW);
9387 addr1_high = tr32(MAC_ADDR_1_HIGH);
9388 addr1_low = tr32(MAC_ADDR_1_LOW);
9390 /* Skip MAC addr 1 if ASF is using it. */
9391 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9392 !(addr1_high == 0 && addr1_low == 0))
9395 spin_lock_bh(&tp->lock);
9396 __tg3_set_mac_addr(tp, skip_mac_1);
9397 __tg3_set_rx_mode(dev);
9398 spin_unlock_bh(&tp->lock);
9403 /* tp->lock is held. */
9404 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9405 dma_addr_t mapping, u32 maxlen_flags,
9409 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9410 ((u64) mapping >> 32));
9412 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9413 ((u64) mapping & 0xffffffff));
9415 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9418 if (!tg3_flag(tp, 5705_PLUS))
9420 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9425 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9429 if (!tg3_flag(tp, ENABLE_TSS)) {
9430 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9431 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9432 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9434 tw32(HOSTCC_TXCOL_TICKS, 0);
9435 tw32(HOSTCC_TXMAX_FRAMES, 0);
9436 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9438 for (; i < tp->txq_cnt; i++) {
9441 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9442 tw32(reg, ec->tx_coalesce_usecs);
9443 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9444 tw32(reg, ec->tx_max_coalesced_frames);
9445 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9446 tw32(reg, ec->tx_max_coalesced_frames_irq);
9450 for (; i < tp->irq_max - 1; i++) {
9451 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9452 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9453 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9457 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9460 u32 limit = tp->rxq_cnt;
9462 if (!tg3_flag(tp, ENABLE_RSS)) {
9463 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9464 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9465 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9468 tw32(HOSTCC_RXCOL_TICKS, 0);
9469 tw32(HOSTCC_RXMAX_FRAMES, 0);
9470 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9473 for (; i < limit; i++) {
9476 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9477 tw32(reg, ec->rx_coalesce_usecs);
9478 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9479 tw32(reg, ec->rx_max_coalesced_frames);
9480 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9481 tw32(reg, ec->rx_max_coalesced_frames_irq);
9484 for (; i < tp->irq_max - 1; i++) {
9485 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9486 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9487 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9491 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9493 tg3_coal_tx_init(tp, ec);
9494 tg3_coal_rx_init(tp, ec);
9496 if (!tg3_flag(tp, 5705_PLUS)) {
9497 u32 val = ec->stats_block_coalesce_usecs;
9499 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9500 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9505 tw32(HOSTCC_STAT_COAL_TICKS, val);
9509 /* tp->lock is held. */
9510 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9514 /* Disable all transmit rings but the first. */
9515 if (!tg3_flag(tp, 5705_PLUS))
9516 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9517 else if (tg3_flag(tp, 5717_PLUS))
9518 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9519 else if (tg3_flag(tp, 57765_CLASS) ||
9520 tg3_asic_rev(tp) == ASIC_REV_5762)
9521 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9523 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9526 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9527 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9528 BDINFO_FLAGS_DISABLED);
9531 /* tp->lock is held. */
9532 static void tg3_tx_rcbs_init(struct tg3 *tp)
9535 u32 txrcb = NIC_SRAM_SEND_RCB;
9537 if (tg3_flag(tp, ENABLE_TSS))
9540 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9541 struct tg3_napi *tnapi = &tp->napi[i];
9543 if (!tnapi->tx_ring)
9546 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9547 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9548 NIC_SRAM_TX_BUFFER_DESC);
9552 /* tp->lock is held. */
9553 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9557 /* Disable all receive return rings but the first. */
9558 if (tg3_flag(tp, 5717_PLUS))
9559 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9560 else if (!tg3_flag(tp, 5705_PLUS))
9561 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9562 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9563 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9564 tg3_flag(tp, 57765_CLASS))
9565 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9567 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9570 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9571 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9572 BDINFO_FLAGS_DISABLED);
9575 /* tp->lock is held. */
9576 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9579 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9581 if (tg3_flag(tp, ENABLE_RSS))
9584 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9585 struct tg3_napi *tnapi = &tp->napi[i];
9590 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9591 (tp->rx_ret_ring_mask + 1) <<
9592 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9596 /* tp->lock is held. */
9597 static void tg3_rings_reset(struct tg3 *tp)
9601 struct tg3_napi *tnapi = &tp->napi[0];
9603 tg3_tx_rcbs_disable(tp);
9605 tg3_rx_ret_rcbs_disable(tp);
9607 /* Disable interrupts */
9608 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9609 tp->napi[0].chk_msi_cnt = 0;
9610 tp->napi[0].last_rx_cons = 0;
9611 tp->napi[0].last_tx_cons = 0;
9613 /* Zero mailbox registers. */
9614 if (tg3_flag(tp, SUPPORT_MSIX)) {
9615 for (i = 1; i < tp->irq_max; i++) {
9616 tp->napi[i].tx_prod = 0;
9617 tp->napi[i].tx_cons = 0;
9618 if (tg3_flag(tp, ENABLE_TSS))
9619 tw32_mailbox(tp->napi[i].prodmbox, 0);
9620 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9621 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9622 tp->napi[i].chk_msi_cnt = 0;
9623 tp->napi[i].last_rx_cons = 0;
9624 tp->napi[i].last_tx_cons = 0;
9626 if (!tg3_flag(tp, ENABLE_TSS))
9627 tw32_mailbox(tp->napi[0].prodmbox, 0);
9629 tp->napi[0].tx_prod = 0;
9630 tp->napi[0].tx_cons = 0;
9631 tw32_mailbox(tp->napi[0].prodmbox, 0);
9632 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9635 /* Make sure the NIC-based send BD rings are disabled. */
9636 if (!tg3_flag(tp, 5705_PLUS)) {
9637 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9638 for (i = 0; i < 16; i++)
9639 tw32_tx_mbox(mbox + i * 8, 0);
9642 /* Clear status block in ram. */
9643 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9645 /* Set status block DMA address */
9646 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9647 ((u64) tnapi->status_mapping >> 32));
9648 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9649 ((u64) tnapi->status_mapping & 0xffffffff));
9651 stblk = HOSTCC_STATBLCK_RING1;
9653 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9654 u64 mapping = (u64)tnapi->status_mapping;
9655 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9656 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9659 /* Clear status block in ram. */
9660 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9663 tg3_tx_rcbs_init(tp);
9664 tg3_rx_ret_rcbs_init(tp);
9667 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9669 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9671 if (!tg3_flag(tp, 5750_PLUS) ||
9672 tg3_flag(tp, 5780_CLASS) ||
9673 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9674 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9675 tg3_flag(tp, 57765_PLUS))
9676 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9677 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9678 tg3_asic_rev(tp) == ASIC_REV_5787)
9679 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9681 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9683 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9684 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9686 val = min(nic_rep_thresh, host_rep_thresh);
9687 tw32(RCVBDI_STD_THRESH, val);
9689 if (tg3_flag(tp, 57765_PLUS))
9690 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9692 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9695 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9697 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9699 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9700 tw32(RCVBDI_JUMBO_THRESH, val);
9702 if (tg3_flag(tp, 57765_PLUS))
9703 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9706 static inline u32 calc_crc(unsigned char *buf, int len)
9714 for (j = 0; j < len; j++) {
9717 for (k = 0; k < 8; k++) {
9730 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9732 /* accept or reject all multicast frames */
9733 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9734 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9735 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9736 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9739 static void __tg3_set_rx_mode(struct net_device *dev)
9741 struct tg3 *tp = netdev_priv(dev);
9744 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9745 RX_MODE_KEEP_VLAN_TAG);
9747 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9748 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9751 if (!tg3_flag(tp, ENABLE_ASF))
9752 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9755 if (dev->flags & IFF_PROMISC) {
9756 /* Promiscuous mode. */
9757 rx_mode |= RX_MODE_PROMISC;
9758 } else if (dev->flags & IFF_ALLMULTI) {
9759 /* Accept all multicast. */
9760 tg3_set_multi(tp, 1);
9761 } else if (netdev_mc_empty(dev)) {
9762 /* Reject all multicast. */
9763 tg3_set_multi(tp, 0);
9765 /* Accept one or more multicast(s). */
9766 struct netdev_hw_addr *ha;
9767 u32 mc_filter[4] = { 0, };
9772 netdev_for_each_mc_addr(ha, dev) {
9773 crc = calc_crc(ha->addr, ETH_ALEN);
9775 regidx = (bit & 0x60) >> 5;
9777 mc_filter[regidx] |= (1 << bit);
9780 tw32(MAC_HASH_REG_0, mc_filter[0]);
9781 tw32(MAC_HASH_REG_1, mc_filter[1]);
9782 tw32(MAC_HASH_REG_2, mc_filter[2]);
9783 tw32(MAC_HASH_REG_3, mc_filter[3]);
9786 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9787 rx_mode |= RX_MODE_PROMISC;
9788 } else if (!(dev->flags & IFF_PROMISC)) {
9789 /* Add all entries into to the mac addr filter list */
9791 struct netdev_hw_addr *ha;
9793 netdev_for_each_uc_addr(ha, dev) {
9794 __tg3_set_one_mac_addr(tp, ha->addr,
9795 i + TG3_UCAST_ADDR_IDX(tp));
9800 if (rx_mode != tp->rx_mode) {
9801 tp->rx_mode = rx_mode;
9802 tw32_f(MAC_RX_MODE, rx_mode);
9807 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9811 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9812 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9815 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9819 if (!tg3_flag(tp, SUPPORT_MSIX))
9822 if (tp->rxq_cnt == 1) {
9823 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9827 /* Validate table against current IRQ count */
9828 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9829 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9833 if (i != TG3_RSS_INDIR_TBL_SIZE)
9834 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9837 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9840 u32 reg = MAC_RSS_INDIR_TBL_0;
9842 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9843 u32 val = tp->rss_ind_tbl[i];
9845 for (; i % 8; i++) {
9847 val |= tp->rss_ind_tbl[i];
9854 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9856 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9857 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9859 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9862 /* tp->lock is held. */
9863 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9865 u32 val, rdmac_mode;
9867 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9869 tg3_disable_ints(tp);
9873 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9875 if (tg3_flag(tp, INIT_COMPLETE))
9876 tg3_abort_hw(tp, 1);
9878 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9879 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9880 tg3_phy_pull_config(tp);
9881 tg3_eee_pull_config(tp, NULL);
9882 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9885 /* Enable MAC control of LPI */
9886 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9892 err = tg3_chip_reset(tp);
9896 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9898 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9899 val = tr32(TG3_CPMU_CTRL);
9900 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9901 tw32(TG3_CPMU_CTRL, val);
9903 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9904 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9905 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9906 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9908 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9909 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9910 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9911 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9913 val = tr32(TG3_CPMU_HST_ACC);
9914 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9915 val |= CPMU_HST_ACC_MACCLK_6_25;
9916 tw32(TG3_CPMU_HST_ACC, val);
9919 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9920 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9921 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9922 PCIE_PWR_MGMT_L1_THRESH_4MS;
9923 tw32(PCIE_PWR_MGMT_THRESH, val);
9925 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9926 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9928 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9930 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9931 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9934 if (tg3_flag(tp, L1PLLPD_EN)) {
9935 u32 grc_mode = tr32(GRC_MODE);
9937 /* Access the lower 1K of PL PCIE block registers. */
9938 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9939 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9942 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9943 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9945 tw32(GRC_MODE, grc_mode);
9948 if (tg3_flag(tp, 57765_CLASS)) {
9949 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9950 u32 grc_mode = tr32(GRC_MODE);
9952 /* Access the lower 1K of PL PCIE block registers. */
9953 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9954 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9956 val = tr32(TG3_PCIE_TLDLPL_PORT +
9957 TG3_PCIE_PL_LO_PHYCTL5);
9958 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9959 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9961 tw32(GRC_MODE, grc_mode);
9964 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9967 /* Fix transmit hangs */
9968 val = tr32(TG3_CPMU_PADRNG_CTL);
9969 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9970 tw32(TG3_CPMU_PADRNG_CTL, val);
9972 grc_mode = tr32(GRC_MODE);
9974 /* Access the lower 1K of DL PCIE block registers. */
9975 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9976 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9978 val = tr32(TG3_PCIE_TLDLPL_PORT +
9979 TG3_PCIE_DL_LO_FTSMAX);
9980 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9981 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9982 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9984 tw32(GRC_MODE, grc_mode);
9987 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9988 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9989 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9990 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9993 /* This works around an issue with Athlon chipsets on
9994 * B3 tigon3 silicon. This bit has no effect on any
9995 * other revision. But do not set this on PCI Express
9996 * chips and don't even touch the clocks if the CPMU is present.
9998 if (!tg3_flag(tp, CPMU_PRESENT)) {
9999 if (!tg3_flag(tp, PCI_EXPRESS))
10000 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10001 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10004 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10005 tg3_flag(tp, PCIX_MODE)) {
10006 val = tr32(TG3PCI_PCISTATE);
10007 val |= PCISTATE_RETRY_SAME_DMA;
10008 tw32(TG3PCI_PCISTATE, val);
10011 if (tg3_flag(tp, ENABLE_APE)) {
10012 /* Allow reads and writes to the
10013 * APE register and memory space.
10015 val = tr32(TG3PCI_PCISTATE);
10016 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10017 PCISTATE_ALLOW_APE_SHMEM_WR |
10018 PCISTATE_ALLOW_APE_PSPACE_WR;
10019 tw32(TG3PCI_PCISTATE, val);
10022 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10023 /* Enable some hw fixes. */
10024 val = tr32(TG3PCI_MSI_DATA);
10025 val |= (1 << 26) | (1 << 28) | (1 << 29);
10026 tw32(TG3PCI_MSI_DATA, val);
10029 /* Descriptor ring init may make accesses to the
10030 * NIC SRAM area to setup the TX descriptors, so we
10031 * can only do this after the hardware has been
10032 * successfully reset.
10034 err = tg3_init_rings(tp);
10038 if (tg3_flag(tp, 57765_PLUS)) {
10039 val = tr32(TG3PCI_DMA_RW_CTRL) &
10040 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10041 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10042 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10043 if (!tg3_flag(tp, 57765_CLASS) &&
10044 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10045 tg3_asic_rev(tp) != ASIC_REV_5762)
10046 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10047 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10048 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10049 tg3_asic_rev(tp) != ASIC_REV_5761) {
10050 /* This value is determined during the probe time DMA
10051 * engine test, tg3_test_dma.
10053 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10056 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10057 GRC_MODE_4X_NIC_SEND_RINGS |
10058 GRC_MODE_NO_TX_PHDR_CSUM |
10059 GRC_MODE_NO_RX_PHDR_CSUM);
10060 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10062 /* Pseudo-header checksum is done by hardware logic and not
10063 * the offload processers, so make the chip do the pseudo-
10064 * header checksums on receive. For transmit it is more
10065 * convenient to do the pseudo-header checksum in software
10066 * as Linux does that on transmit for us in all cases.
10068 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10070 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10072 tw32(TG3_RX_PTP_CTL,
10073 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10075 if (tg3_flag(tp, PTP_CAPABLE))
10076 val |= GRC_MODE_TIME_SYNC_ENABLE;
10078 tw32(GRC_MODE, tp->grc_mode | val);
10080 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10081 * south bridge limitation. As a workaround, Driver is setting MRRS
10082 * to 2048 instead of default 4096.
10084 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10085 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10086 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10087 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10090 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10091 val = tr32(GRC_MISC_CFG);
10093 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10094 tw32(GRC_MISC_CFG, val);
10096 /* Initialize MBUF/DESC pool. */
10097 if (tg3_flag(tp, 5750_PLUS)) {
10099 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10100 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10101 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10102 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10104 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10105 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10106 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10107 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10110 fw_len = tp->fw_len;
10111 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10112 tw32(BUFMGR_MB_POOL_ADDR,
10113 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10114 tw32(BUFMGR_MB_POOL_SIZE,
10115 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10118 if (tp->dev->mtu <= ETH_DATA_LEN) {
10119 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10120 tp->bufmgr_config.mbuf_read_dma_low_water);
10121 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10122 tp->bufmgr_config.mbuf_mac_rx_low_water);
10123 tw32(BUFMGR_MB_HIGH_WATER,
10124 tp->bufmgr_config.mbuf_high_water);
10126 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10127 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10128 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10129 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10130 tw32(BUFMGR_MB_HIGH_WATER,
10131 tp->bufmgr_config.mbuf_high_water_jumbo);
10133 tw32(BUFMGR_DMA_LOW_WATER,
10134 tp->bufmgr_config.dma_low_water);
10135 tw32(BUFMGR_DMA_HIGH_WATER,
10136 tp->bufmgr_config.dma_high_water);
10138 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10139 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10140 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10141 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10142 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10143 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10144 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10145 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10146 tw32(BUFMGR_MODE, val);
10147 for (i = 0; i < 2000; i++) {
10148 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10153 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10157 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10158 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10160 tg3_setup_rxbd_thresholds(tp);
10162 /* Initialize TG3_BDINFO's at:
10163 * RCVDBDI_STD_BD: standard eth size rx ring
10164 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10165 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10168 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10169 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10170 * ring attribute flags
10171 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10173 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10174 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10176 * The size of each ring is fixed in the firmware, but the location is
10179 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10180 ((u64) tpr->rx_std_mapping >> 32));
10181 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10182 ((u64) tpr->rx_std_mapping & 0xffffffff));
10183 if (!tg3_flag(tp, 5717_PLUS))
10184 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10185 NIC_SRAM_RX_BUFFER_DESC);
10187 /* Disable the mini ring */
10188 if (!tg3_flag(tp, 5705_PLUS))
10189 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10190 BDINFO_FLAGS_DISABLED);
10192 /* Program the jumbo buffer descriptor ring control
10193 * blocks on those devices that have them.
10195 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10196 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10198 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10199 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10200 ((u64) tpr->rx_jmb_mapping >> 32));
10201 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10202 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10203 val = TG3_RX_JMB_RING_SIZE(tp) <<
10204 BDINFO_FLAGS_MAXLEN_SHIFT;
10205 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10206 val | BDINFO_FLAGS_USE_EXT_RECV);
10207 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10208 tg3_flag(tp, 57765_CLASS) ||
10209 tg3_asic_rev(tp) == ASIC_REV_5762)
10210 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10211 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10214 BDINFO_FLAGS_DISABLED);
10217 if (tg3_flag(tp, 57765_PLUS)) {
10218 val = TG3_RX_STD_RING_SIZE(tp);
10219 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10220 val |= (TG3_RX_STD_DMA_SZ << 2);
10222 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10224 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10226 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10228 tpr->rx_std_prod_idx = tp->rx_pending;
10229 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10231 tpr->rx_jmb_prod_idx =
10232 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10233 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10235 tg3_rings_reset(tp);
10237 /* Initialize MAC address and backoff seed. */
10238 __tg3_set_mac_addr(tp, false);
10240 /* MTU + ethernet header + FCS + optional VLAN tag */
10241 tw32(MAC_RX_MTU_SIZE,
10242 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10244 /* The slot time is changed by tg3_setup_phy if we
10245 * run at gigabit with half duplex.
10247 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10248 (6 << TX_LENGTHS_IPG_SHIFT) |
10249 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10251 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10252 tg3_asic_rev(tp) == ASIC_REV_5762)
10253 val |= tr32(MAC_TX_LENGTHS) &
10254 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10255 TX_LENGTHS_CNT_DWN_VAL_MSK);
10257 tw32(MAC_TX_LENGTHS, val);
10259 /* Receive rules. */
10260 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10261 tw32(RCVLPC_CONFIG, 0x0181);
10263 /* Calculate RDMAC_MODE setting early, we need it to determine
10264 * the RCVLPC_STATE_ENABLE mask.
10266 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10267 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10268 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10269 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10270 RDMAC_MODE_LNGREAD_ENAB);
10272 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10273 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10275 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10276 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10277 tg3_asic_rev(tp) == ASIC_REV_57780)
10278 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10279 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10280 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10282 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10283 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10284 if (tg3_flag(tp, TSO_CAPABLE) &&
10285 tg3_asic_rev(tp) == ASIC_REV_5705) {
10286 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10287 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10288 !tg3_flag(tp, IS_5788)) {
10289 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10293 if (tg3_flag(tp, PCI_EXPRESS))
10294 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10296 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10298 if (tp->dev->mtu <= ETH_DATA_LEN) {
10299 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10300 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10304 if (tg3_flag(tp, HW_TSO_1) ||
10305 tg3_flag(tp, HW_TSO_2) ||
10306 tg3_flag(tp, HW_TSO_3))
10307 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10309 if (tg3_flag(tp, 57765_PLUS) ||
10310 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10311 tg3_asic_rev(tp) == ASIC_REV_57780)
10312 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10314 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10315 tg3_asic_rev(tp) == ASIC_REV_5762)
10316 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10318 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10319 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10320 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10321 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10322 tg3_flag(tp, 57765_PLUS)) {
10325 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10326 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10328 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10330 val = tr32(tgtreg);
10331 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10332 tg3_asic_rev(tp) == ASIC_REV_5762) {
10333 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10334 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10335 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10336 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10337 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10338 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10340 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10343 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10344 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10345 tg3_asic_rev(tp) == ASIC_REV_5762) {
10348 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10349 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10351 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10353 val = tr32(tgtreg);
10355 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10356 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10359 /* Receive/send statistics. */
10360 if (tg3_flag(tp, 5750_PLUS)) {
10361 val = tr32(RCVLPC_STATS_ENABLE);
10362 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10363 tw32(RCVLPC_STATS_ENABLE, val);
10364 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10365 tg3_flag(tp, TSO_CAPABLE)) {
10366 val = tr32(RCVLPC_STATS_ENABLE);
10367 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10368 tw32(RCVLPC_STATS_ENABLE, val);
10370 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10372 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10373 tw32(SNDDATAI_STATSENAB, 0xffffff);
10374 tw32(SNDDATAI_STATSCTRL,
10375 (SNDDATAI_SCTRL_ENABLE |
10376 SNDDATAI_SCTRL_FASTUPD));
10378 /* Setup host coalescing engine. */
10379 tw32(HOSTCC_MODE, 0);
10380 for (i = 0; i < 2000; i++) {
10381 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10386 __tg3_set_coalesce(tp, &tp->coal);
10388 if (!tg3_flag(tp, 5705_PLUS)) {
10389 /* Status/statistics block address. See tg3_timer,
10390 * the tg3_periodic_fetch_stats call there, and
10391 * tg3_get_stats to see how this works for 5705/5750 chips.
10393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10394 ((u64) tp->stats_mapping >> 32));
10395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10396 ((u64) tp->stats_mapping & 0xffffffff));
10397 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10399 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10401 /* Clear statistics and status block memory areas */
10402 for (i = NIC_SRAM_STATS_BLK;
10403 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10404 i += sizeof(u32)) {
10405 tg3_write_mem(tp, i, 0);
10410 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10412 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10413 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10414 if (!tg3_flag(tp, 5705_PLUS))
10415 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10417 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10418 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10419 /* reset to prevent losing 1st rx packet intermittently */
10420 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10424 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10425 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10426 MAC_MODE_FHDE_ENABLE;
10427 if (tg3_flag(tp, ENABLE_APE))
10428 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10429 if (!tg3_flag(tp, 5705_PLUS) &&
10430 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10431 tg3_asic_rev(tp) != ASIC_REV_5700)
10432 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10433 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10436 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10437 * If TG3_FLAG_IS_NIC is zero, we should read the
10438 * register to preserve the GPIO settings for LOMs. The GPIOs,
10439 * whether used as inputs or outputs, are set by boot code after
10442 if (!tg3_flag(tp, IS_NIC)) {
10445 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10446 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10447 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10449 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10450 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10451 GRC_LCLCTRL_GPIO_OUTPUT3;
10453 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10454 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10456 tp->grc_local_ctrl &= ~gpio_mask;
10457 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10459 /* GPIO1 must be driven high for eeprom write protect */
10460 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10461 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10462 GRC_LCLCTRL_GPIO_OUTPUT1);
10464 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10467 if (tg3_flag(tp, USING_MSIX)) {
10468 val = tr32(MSGINT_MODE);
10469 val |= MSGINT_MODE_ENABLE;
10470 if (tp->irq_cnt > 1)
10471 val |= MSGINT_MODE_MULTIVEC_EN;
10472 if (!tg3_flag(tp, 1SHOT_MSI))
10473 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10474 tw32(MSGINT_MODE, val);
10477 if (!tg3_flag(tp, 5705_PLUS)) {
10478 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10482 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10483 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10484 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10485 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10486 WDMAC_MODE_LNGREAD_ENAB);
10488 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10489 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10490 if (tg3_flag(tp, TSO_CAPABLE) &&
10491 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10492 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10494 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10495 !tg3_flag(tp, IS_5788)) {
10496 val |= WDMAC_MODE_RX_ACCEL;
10500 /* Enable host coalescing bug fix */
10501 if (tg3_flag(tp, 5755_PLUS))
10502 val |= WDMAC_MODE_STATUS_TAG_FIX;
10504 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10505 val |= WDMAC_MODE_BURST_ALL_DATA;
10507 tw32_f(WDMAC_MODE, val);
10510 if (tg3_flag(tp, PCIX_MODE)) {
10513 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10515 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10516 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10517 pcix_cmd |= PCI_X_CMD_READ_2K;
10518 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10519 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10520 pcix_cmd |= PCI_X_CMD_READ_2K;
10522 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10526 tw32_f(RDMAC_MODE, rdmac_mode);
10529 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10530 tg3_asic_rev(tp) == ASIC_REV_5720) {
10531 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10532 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10535 if (i < TG3_NUM_RDMA_CHANNELS) {
10536 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10537 val |= tg3_lso_rd_dma_workaround_bit(tp);
10538 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10539 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10543 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10544 if (!tg3_flag(tp, 5705_PLUS))
10545 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10547 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10548 tw32(SNDDATAC_MODE,
10549 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10551 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10553 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10554 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10555 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10556 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10557 val |= RCVDBDI_MODE_LRG_RING_SZ;
10558 tw32(RCVDBDI_MODE, val);
10559 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10560 if (tg3_flag(tp, HW_TSO_1) ||
10561 tg3_flag(tp, HW_TSO_2) ||
10562 tg3_flag(tp, HW_TSO_3))
10563 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10564 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10565 if (tg3_flag(tp, ENABLE_TSS))
10566 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10567 tw32(SNDBDI_MODE, val);
10568 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10570 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10571 err = tg3_load_5701_a0_firmware_fix(tp);
10576 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10577 /* Ignore any errors for the firmware download. If download
10578 * fails, the device will operate with EEE disabled
10580 tg3_load_57766_firmware(tp);
10583 if (tg3_flag(tp, TSO_CAPABLE)) {
10584 err = tg3_load_tso_firmware(tp);
10589 tp->tx_mode = TX_MODE_ENABLE;
10591 if (tg3_flag(tp, 5755_PLUS) ||
10592 tg3_asic_rev(tp) == ASIC_REV_5906)
10593 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10595 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10596 tg3_asic_rev(tp) == ASIC_REV_5762) {
10597 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10598 tp->tx_mode &= ~val;
10599 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10602 tw32_f(MAC_TX_MODE, tp->tx_mode);
10605 if (tg3_flag(tp, ENABLE_RSS)) {
10608 tg3_rss_write_indir_tbl(tp);
10610 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10612 for (i = 0; i < 10 ; i++)
10613 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10616 tp->rx_mode = RX_MODE_ENABLE;
10617 if (tg3_flag(tp, 5755_PLUS))
10618 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10620 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10621 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10623 if (tg3_flag(tp, ENABLE_RSS))
10624 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10625 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10626 RX_MODE_RSS_IPV6_HASH_EN |
10627 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10628 RX_MODE_RSS_IPV4_HASH_EN |
10629 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10631 tw32_f(MAC_RX_MODE, tp->rx_mode);
10634 tw32(MAC_LED_CTRL, tp->led_ctrl);
10636 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10637 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10638 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10641 tw32_f(MAC_RX_MODE, tp->rx_mode);
10644 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10645 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10646 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10647 /* Set drive transmission level to 1.2V */
10648 /* only if the signal pre-emphasis bit is not set */
10649 val = tr32(MAC_SERDES_CFG);
10652 tw32(MAC_SERDES_CFG, val);
10654 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10655 tw32(MAC_SERDES_CFG, 0x616000);
10658 /* Prevent chip from dropping frames when flow control
10661 if (tg3_flag(tp, 57765_CLASS))
10665 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10667 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10668 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10669 /* Use hardware link auto-negotiation */
10670 tg3_flag_set(tp, HW_AUTONEG);
10673 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10674 tg3_asic_rev(tp) == ASIC_REV_5714) {
10677 tmp = tr32(SERDES_RX_CTRL);
10678 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10679 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10680 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10681 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10684 if (!tg3_flag(tp, USE_PHYLIB)) {
10685 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10686 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10688 err = tg3_setup_phy(tp, false);
10692 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10693 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10696 /* Clear CRC stats. */
10697 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10698 tg3_writephy(tp, MII_TG3_TEST1,
10699 tmp | MII_TG3_TEST1_CRC_EN);
10700 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10705 __tg3_set_rx_mode(tp->dev);
10707 /* Initialize receive rules. */
10708 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10709 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10710 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10711 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10717 if (tg3_flag(tp, ENABLE_ASF))
10721 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10723 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10725 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10727 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10729 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10731 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10733 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10735 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10737 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10739 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10741 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10743 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10745 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10747 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10755 if (tg3_flag(tp, ENABLE_APE))
10756 /* Write our heartbeat update interval to APE. */
10757 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10758 APE_HOST_HEARTBEAT_INT_5SEC);
10760 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10765 /* Called at device open time to get the chip ready for
10766 * packet processing. Invoked with tp->lock held.
10768 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10770 /* Chip may have been just powered on. If so, the boot code may still
10771 * be running initialization. Wait for it to finish to avoid races in
10772 * accessing the hardware.
10774 tg3_enable_register_access(tp);
10777 tg3_switch_clocks(tp);
10779 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10781 return tg3_reset_hw(tp, reset_phy);
10784 #ifdef CONFIG_TIGON3_HWMON
10785 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10789 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10790 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10792 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10795 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10796 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10797 memset(ocir, 0, TG3_OCIR_LEN);
10801 /* sysfs attributes for hwmon */
10802 static ssize_t tg3_show_temp(struct device *dev,
10803 struct device_attribute *devattr, char *buf)
10805 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10806 struct tg3 *tp = dev_get_drvdata(dev);
10809 spin_lock_bh(&tp->lock);
10810 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10811 sizeof(temperature));
10812 spin_unlock_bh(&tp->lock);
10813 return sprintf(buf, "%u\n", temperature * 1000);
10817 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10818 TG3_TEMP_SENSOR_OFFSET);
10819 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10820 TG3_TEMP_CAUTION_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10822 TG3_TEMP_MAX_OFFSET);
10824 static struct attribute *tg3_attrs[] = {
10825 &sensor_dev_attr_temp1_input.dev_attr.attr,
10826 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10827 &sensor_dev_attr_temp1_max.dev_attr.attr,
10830 ATTRIBUTE_GROUPS(tg3);
10832 static void tg3_hwmon_close(struct tg3 *tp)
10834 if (tp->hwmon_dev) {
10835 hwmon_device_unregister(tp->hwmon_dev);
10836 tp->hwmon_dev = NULL;
10840 static void tg3_hwmon_open(struct tg3 *tp)
10844 struct pci_dev *pdev = tp->pdev;
10845 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10847 tg3_sd_scan_scratchpad(tp, ocirs);
10849 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10850 if (!ocirs[i].src_data_length)
10853 size += ocirs[i].src_hdr_length;
10854 size += ocirs[i].src_data_length;
10860 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10862 if (IS_ERR(tp->hwmon_dev)) {
10863 tp->hwmon_dev = NULL;
10864 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10868 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10869 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10870 #endif /* CONFIG_TIGON3_HWMON */
10873 #define TG3_STAT_ADD32(PSTAT, REG) \
10874 do { u32 __val = tr32(REG); \
10875 (PSTAT)->low += __val; \
10876 if ((PSTAT)->low < __val) \
10877 (PSTAT)->high += 1; \
10880 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10882 struct tg3_hw_stats *sp = tp->hw_stats;
10887 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10888 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10889 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10890 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10891 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10892 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10893 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10894 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10895 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10896 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10897 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10898 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10899 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10900 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10901 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10902 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10905 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10906 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10907 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10908 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10911 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10912 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10913 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10914 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10915 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10916 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10917 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10918 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10919 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10920 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10921 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10922 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10923 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10924 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10926 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10927 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10929 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10930 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10931 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10933 u32 val = tr32(HOSTCC_FLOW_ATTN);
10934 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10936 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10937 sp->rx_discards.low += val;
10938 if (sp->rx_discards.low < val)
10939 sp->rx_discards.high += 1;
10941 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10943 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10946 static void tg3_chk_missed_msi(struct tg3 *tp)
10950 for (i = 0; i < tp->irq_cnt; i++) {
10951 struct tg3_napi *tnapi = &tp->napi[i];
10953 if (tg3_has_work(tnapi)) {
10954 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10955 tnapi->last_tx_cons == tnapi->tx_cons) {
10956 if (tnapi->chk_msi_cnt < 1) {
10957 tnapi->chk_msi_cnt++;
10963 tnapi->chk_msi_cnt = 0;
10964 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10965 tnapi->last_tx_cons = tnapi->tx_cons;
10969 static void tg3_timer(struct timer_list *t)
10971 struct tg3 *tp = from_timer(tp, t, timer);
10973 spin_lock(&tp->lock);
10975 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10976 spin_unlock(&tp->lock);
10977 goto restart_timer;
10980 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10981 tg3_flag(tp, 57765_CLASS))
10982 tg3_chk_missed_msi(tp);
10984 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10985 /* BCM4785: Flush posted writes from GbE to host memory. */
10989 if (!tg3_flag(tp, TAGGED_STATUS)) {
10990 /* All of this garbage is because when using non-tagged
10991 * IRQ status the mailbox/status_block protocol the chip
10992 * uses with the cpu is race prone.
10994 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10995 tw32(GRC_LOCAL_CTRL,
10996 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10998 tw32(HOSTCC_MODE, tp->coalesce_mode |
10999 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11002 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11003 spin_unlock(&tp->lock);
11004 tg3_reset_task_schedule(tp);
11005 goto restart_timer;
11009 /* This part only runs once per second. */
11010 if (!--tp->timer_counter) {
11011 if (tg3_flag(tp, 5705_PLUS))
11012 tg3_periodic_fetch_stats(tp);
11014 if (tp->setlpicnt && !--tp->setlpicnt)
11015 tg3_phy_eee_enable(tp);
11017 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11021 mac_stat = tr32(MAC_STATUS);
11024 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11025 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11027 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11031 tg3_setup_phy(tp, false);
11032 } else if (tg3_flag(tp, POLL_SERDES)) {
11033 u32 mac_stat = tr32(MAC_STATUS);
11034 int need_setup = 0;
11037 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11040 if (!tp->link_up &&
11041 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11042 MAC_STATUS_SIGNAL_DET))) {
11046 if (!tp->serdes_counter) {
11049 ~MAC_MODE_PORT_MODE_MASK));
11051 tw32_f(MAC_MODE, tp->mac_mode);
11054 tg3_setup_phy(tp, false);
11056 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11057 tg3_flag(tp, 5780_CLASS)) {
11058 tg3_serdes_parallel_detect(tp);
11059 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11060 u32 cpmu = tr32(TG3_CPMU_STATUS);
11061 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11062 TG3_CPMU_STATUS_LINK_MASK);
11064 if (link_up != tp->link_up)
11065 tg3_setup_phy(tp, false);
11068 tp->timer_counter = tp->timer_multiplier;
11071 /* Heartbeat is only sent once every 2 seconds.
11073 * The heartbeat is to tell the ASF firmware that the host
11074 * driver is still alive. In the event that the OS crashes,
11075 * ASF needs to reset the hardware to free up the FIFO space
11076 * that may be filled with rx packets destined for the host.
11077 * If the FIFO is full, ASF will no longer function properly.
11079 * Unintended resets have been reported on real time kernels
11080 * where the timer doesn't run on time. Netpoll will also have
11083 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11084 * to check the ring condition when the heartbeat is expiring
11085 * before doing the reset. This will prevent most unintended
11088 if (!--tp->asf_counter) {
11089 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11090 tg3_wait_for_event_ack(tp);
11092 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11093 FWCMD_NICDRV_ALIVE3);
11094 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11096 TG3_FW_UPDATE_TIMEOUT_SEC);
11098 tg3_generate_fw_event(tp);
11100 tp->asf_counter = tp->asf_multiplier;
11103 /* Update the APE heartbeat every 5 seconds.*/
11104 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11106 spin_unlock(&tp->lock);
11109 tp->timer.expires = jiffies + tp->timer_offset;
11110 add_timer(&tp->timer);
11113 static void tg3_timer_init(struct tg3 *tp)
11115 if (tg3_flag(tp, TAGGED_STATUS) &&
11116 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11117 !tg3_flag(tp, 57765_CLASS))
11118 tp->timer_offset = HZ;
11120 tp->timer_offset = HZ / 10;
11122 BUG_ON(tp->timer_offset > HZ);
11124 tp->timer_multiplier = (HZ / tp->timer_offset);
11125 tp->asf_multiplier = (HZ / tp->timer_offset) *
11126 TG3_FW_UPDATE_FREQ_SEC;
11128 timer_setup(&tp->timer, tg3_timer, 0);
11131 static void tg3_timer_start(struct tg3 *tp)
11133 tp->asf_counter = tp->asf_multiplier;
11134 tp->timer_counter = tp->timer_multiplier;
11136 tp->timer.expires = jiffies + tp->timer_offset;
11137 add_timer(&tp->timer);
11140 static void tg3_timer_stop(struct tg3 *tp)
11142 del_timer_sync(&tp->timer);
11145 /* Restart hardware after configuration changes, self-test, etc.
11146 * Invoked with tp->lock held.
11148 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11149 __releases(tp->lock)
11150 __acquires(tp->lock)
11154 err = tg3_init_hw(tp, reset_phy);
11156 netdev_err(tp->dev,
11157 "Failed to re-initialize device, aborting\n");
11158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11159 tg3_full_unlock(tp);
11160 tg3_timer_stop(tp);
11162 tg3_napi_enable(tp);
11163 dev_close(tp->dev);
11164 tg3_full_lock(tp, 0);
11169 static void tg3_reset_task(struct work_struct *work)
11171 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11175 tg3_full_lock(tp, 0);
11177 if (!netif_running(tp->dev)) {
11178 tg3_flag_clear(tp, RESET_TASK_PENDING);
11179 tg3_full_unlock(tp);
11184 tg3_full_unlock(tp);
11188 tg3_netif_stop(tp);
11190 tg3_full_lock(tp, 1);
11192 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11193 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11194 tp->write32_rx_mbox = tg3_write_flush_reg32;
11195 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11196 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11200 err = tg3_init_hw(tp, true);
11204 tg3_netif_start(tp);
11207 tg3_full_unlock(tp);
11212 tg3_flag_clear(tp, RESET_TASK_PENDING);
11216 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11219 unsigned long flags;
11221 struct tg3_napi *tnapi = &tp->napi[irq_num];
11223 if (tp->irq_cnt == 1)
11224 name = tp->dev->name;
11226 name = &tnapi->irq_lbl[0];
11227 if (tnapi->tx_buffers && tnapi->rx_rcb)
11228 snprintf(name, IFNAMSIZ,
11229 "%s-txrx-%d", tp->dev->name, irq_num);
11230 else if (tnapi->tx_buffers)
11231 snprintf(name, IFNAMSIZ,
11232 "%s-tx-%d", tp->dev->name, irq_num);
11233 else if (tnapi->rx_rcb)
11234 snprintf(name, IFNAMSIZ,
11235 "%s-rx-%d", tp->dev->name, irq_num);
11237 snprintf(name, IFNAMSIZ,
11238 "%s-%d", tp->dev->name, irq_num);
11239 name[IFNAMSIZ-1] = 0;
11242 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11244 if (tg3_flag(tp, 1SHOT_MSI))
11245 fn = tg3_msi_1shot;
11248 fn = tg3_interrupt;
11249 if (tg3_flag(tp, TAGGED_STATUS))
11250 fn = tg3_interrupt_tagged;
11251 flags = IRQF_SHARED;
11254 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11257 static int tg3_test_interrupt(struct tg3 *tp)
11259 struct tg3_napi *tnapi = &tp->napi[0];
11260 struct net_device *dev = tp->dev;
11261 int err, i, intr_ok = 0;
11264 if (!netif_running(dev))
11267 tg3_disable_ints(tp);
11269 free_irq(tnapi->irq_vec, tnapi);
11272 * Turn off MSI one shot mode. Otherwise this test has no
11273 * observable way to know whether the interrupt was delivered.
11275 if (tg3_flag(tp, 57765_PLUS)) {
11276 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11277 tw32(MSGINT_MODE, val);
11280 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11281 IRQF_SHARED, dev->name, tnapi);
11285 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11286 tg3_enable_ints(tp);
11288 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11291 for (i = 0; i < 5; i++) {
11292 u32 int_mbox, misc_host_ctrl;
11294 int_mbox = tr32_mailbox(tnapi->int_mbox);
11295 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11297 if ((int_mbox != 0) ||
11298 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11303 if (tg3_flag(tp, 57765_PLUS) &&
11304 tnapi->hw_status->status_tag != tnapi->last_tag)
11305 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11310 tg3_disable_ints(tp);
11312 free_irq(tnapi->irq_vec, tnapi);
11314 err = tg3_request_irq(tp, 0);
11320 /* Reenable MSI one shot mode. */
11321 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11322 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11323 tw32(MSGINT_MODE, val);
11331 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11332 * successfully restored
11334 static int tg3_test_msi(struct tg3 *tp)
11339 if (!tg3_flag(tp, USING_MSI))
11342 /* Turn off SERR reporting in case MSI terminates with Master
11345 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11346 pci_write_config_word(tp->pdev, PCI_COMMAND,
11347 pci_cmd & ~PCI_COMMAND_SERR);
11349 err = tg3_test_interrupt(tp);
11351 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11356 /* other failures */
11360 /* MSI test failed, go back to INTx mode */
11361 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11362 "to INTx mode. Please report this failure to the PCI "
11363 "maintainer and include system chipset information\n");
11365 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11367 pci_disable_msi(tp->pdev);
11369 tg3_flag_clear(tp, USING_MSI);
11370 tp->napi[0].irq_vec = tp->pdev->irq;
11372 err = tg3_request_irq(tp, 0);
11376 /* Need to reset the chip because the MSI cycle may have terminated
11377 * with Master Abort.
11379 tg3_full_lock(tp, 1);
11381 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11382 err = tg3_init_hw(tp, true);
11384 tg3_full_unlock(tp);
11387 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11392 static int tg3_request_firmware(struct tg3 *tp)
11394 const struct tg3_firmware_hdr *fw_hdr;
11396 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11397 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11402 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11404 /* Firmware blob starts with version numbers, followed by
11405 * start address and _full_ length including BSS sections
11406 * (which must be longer than the actual data, of course
11409 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11410 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11411 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11412 tp->fw_len, tp->fw_needed);
11413 release_firmware(tp->fw);
11418 /* We no longer need firmware; we have it. */
11419 tp->fw_needed = NULL;
11423 static u32 tg3_irq_count(struct tg3 *tp)
11425 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11428 /* We want as many rx rings enabled as there are cpus.
11429 * In multiqueue MSI-X mode, the first MSI-X vector
11430 * only deals with link interrupts, etc, so we add
11431 * one to the number of vectors we are requesting.
11433 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11439 static bool tg3_enable_msix(struct tg3 *tp)
11442 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11444 tp->txq_cnt = tp->txq_req;
11445 tp->rxq_cnt = tp->rxq_req;
11447 tp->rxq_cnt = netif_get_num_default_rss_queues();
11448 if (tp->rxq_cnt > tp->rxq_max)
11449 tp->rxq_cnt = tp->rxq_max;
11451 /* Disable multiple TX rings by default. Simple round-robin hardware
11452 * scheduling of the TX rings can cause starvation of rings with
11453 * small packets when other rings have TSO or jumbo packets.
11458 tp->irq_cnt = tg3_irq_count(tp);
11460 for (i = 0; i < tp->irq_max; i++) {
11461 msix_ent[i].entry = i;
11462 msix_ent[i].vector = 0;
11465 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11468 } else if (rc < tp->irq_cnt) {
11469 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11472 tp->rxq_cnt = max(rc - 1, 1);
11474 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11477 for (i = 0; i < tp->irq_max; i++)
11478 tp->napi[i].irq_vec = msix_ent[i].vector;
11480 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11481 pci_disable_msix(tp->pdev);
11485 if (tp->irq_cnt == 1)
11488 tg3_flag_set(tp, ENABLE_RSS);
11490 if (tp->txq_cnt > 1)
11491 tg3_flag_set(tp, ENABLE_TSS);
11493 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11498 static void tg3_ints_init(struct tg3 *tp)
11500 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11501 !tg3_flag(tp, TAGGED_STATUS)) {
11502 /* All MSI supporting chips should support tagged
11503 * status. Assert that this is the case.
11505 netdev_warn(tp->dev,
11506 "MSI without TAGGED_STATUS? Not using MSI\n");
11510 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11511 tg3_flag_set(tp, USING_MSIX);
11512 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11513 tg3_flag_set(tp, USING_MSI);
11515 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11516 u32 msi_mode = tr32(MSGINT_MODE);
11517 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11518 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11519 if (!tg3_flag(tp, 1SHOT_MSI))
11520 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11521 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11524 if (!tg3_flag(tp, USING_MSIX)) {
11526 tp->napi[0].irq_vec = tp->pdev->irq;
11529 if (tp->irq_cnt == 1) {
11532 netif_set_real_num_tx_queues(tp->dev, 1);
11533 netif_set_real_num_rx_queues(tp->dev, 1);
11537 static void tg3_ints_fini(struct tg3 *tp)
11539 if (tg3_flag(tp, USING_MSIX))
11540 pci_disable_msix(tp->pdev);
11541 else if (tg3_flag(tp, USING_MSI))
11542 pci_disable_msi(tp->pdev);
11543 tg3_flag_clear(tp, USING_MSI);
11544 tg3_flag_clear(tp, USING_MSIX);
11545 tg3_flag_clear(tp, ENABLE_RSS);
11546 tg3_flag_clear(tp, ENABLE_TSS);
11549 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11552 struct net_device *dev = tp->dev;
11556 * Setup interrupts first so we know how
11557 * many NAPI resources to allocate
11561 tg3_rss_check_indir_tbl(tp);
11563 /* The placement of this call is tied
11564 * to the setup and use of Host TX descriptors.
11566 err = tg3_alloc_consistent(tp);
11568 goto out_ints_fini;
11572 tg3_napi_enable(tp);
11574 for (i = 0; i < tp->irq_cnt; i++) {
11575 err = tg3_request_irq(tp, i);
11577 for (i--; i >= 0; i--) {
11578 struct tg3_napi *tnapi = &tp->napi[i];
11580 free_irq(tnapi->irq_vec, tnapi);
11582 goto out_napi_fini;
11586 tg3_full_lock(tp, 0);
11589 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11591 err = tg3_init_hw(tp, reset_phy);
11593 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11594 tg3_free_rings(tp);
11597 tg3_full_unlock(tp);
11602 if (test_irq && tg3_flag(tp, USING_MSI)) {
11603 err = tg3_test_msi(tp);
11606 tg3_full_lock(tp, 0);
11607 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11608 tg3_free_rings(tp);
11609 tg3_full_unlock(tp);
11611 goto out_napi_fini;
11614 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11615 u32 val = tr32(PCIE_TRANSACTION_CFG);
11617 tw32(PCIE_TRANSACTION_CFG,
11618 val | PCIE_TRANS_CFG_1SHOT_MSI);
11624 tg3_hwmon_open(tp);
11626 tg3_full_lock(tp, 0);
11628 tg3_timer_start(tp);
11629 tg3_flag_set(tp, INIT_COMPLETE);
11630 tg3_enable_ints(tp);
11632 tg3_ptp_resume(tp);
11634 tg3_full_unlock(tp);
11636 netif_tx_start_all_queues(dev);
11639 * Reset loopback feature if it was turned on while the device was down
11640 * make sure that it's installed properly now.
11642 if (dev->features & NETIF_F_LOOPBACK)
11643 tg3_set_loopback(dev, dev->features);
11648 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11649 struct tg3_napi *tnapi = &tp->napi[i];
11650 free_irq(tnapi->irq_vec, tnapi);
11654 tg3_napi_disable(tp);
11656 tg3_free_consistent(tp);
11664 static void tg3_stop(struct tg3 *tp)
11668 tg3_reset_task_cancel(tp);
11669 tg3_netif_stop(tp);
11671 tg3_timer_stop(tp);
11673 tg3_hwmon_close(tp);
11677 tg3_full_lock(tp, 1);
11679 tg3_disable_ints(tp);
11681 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11682 tg3_free_rings(tp);
11683 tg3_flag_clear(tp, INIT_COMPLETE);
11685 tg3_full_unlock(tp);
11687 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11688 struct tg3_napi *tnapi = &tp->napi[i];
11689 free_irq(tnapi->irq_vec, tnapi);
11696 tg3_free_consistent(tp);
11699 static int tg3_open(struct net_device *dev)
11701 struct tg3 *tp = netdev_priv(dev);
11704 if (tp->pcierr_recovery) {
11705 netdev_err(dev, "Failed to open device. PCI error recovery "
11710 if (tp->fw_needed) {
11711 err = tg3_request_firmware(tp);
11712 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11714 netdev_warn(tp->dev, "EEE capability disabled\n");
11715 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11716 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11717 netdev_warn(tp->dev, "EEE capability restored\n");
11718 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11720 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11724 netdev_warn(tp->dev, "TSO capability disabled\n");
11725 tg3_flag_clear(tp, TSO_CAPABLE);
11726 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11727 netdev_notice(tp->dev, "TSO capability restored\n");
11728 tg3_flag_set(tp, TSO_CAPABLE);
11732 tg3_carrier_off(tp);
11734 err = tg3_power_up(tp);
11738 tg3_full_lock(tp, 0);
11740 tg3_disable_ints(tp);
11741 tg3_flag_clear(tp, INIT_COMPLETE);
11743 tg3_full_unlock(tp);
11745 err = tg3_start(tp,
11746 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11749 tg3_frob_aux_power(tp, false);
11750 pci_set_power_state(tp->pdev, PCI_D3hot);
11756 static int tg3_close(struct net_device *dev)
11758 struct tg3 *tp = netdev_priv(dev);
11760 if (tp->pcierr_recovery) {
11761 netdev_err(dev, "Failed to close device. PCI error recovery "
11768 if (pci_device_is_present(tp->pdev)) {
11769 tg3_power_down_prepare(tp);
11771 tg3_carrier_off(tp);
11776 static inline u64 get_stat64(tg3_stat64_t *val)
11778 return ((u64)val->high << 32) | ((u64)val->low);
11781 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11783 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11785 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11786 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11787 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11790 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11791 tg3_writephy(tp, MII_TG3_TEST1,
11792 val | MII_TG3_TEST1_CRC_EN);
11793 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11797 tp->phy_crc_errors += val;
11799 return tp->phy_crc_errors;
11802 return get_stat64(&hw_stats->rx_fcs_errors);
11805 #define ESTAT_ADD(member) \
11806 estats->member = old_estats->member + \
11807 get_stat64(&hw_stats->member)
11809 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11811 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11812 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11814 ESTAT_ADD(rx_octets);
11815 ESTAT_ADD(rx_fragments);
11816 ESTAT_ADD(rx_ucast_packets);
11817 ESTAT_ADD(rx_mcast_packets);
11818 ESTAT_ADD(rx_bcast_packets);
11819 ESTAT_ADD(rx_fcs_errors);
11820 ESTAT_ADD(rx_align_errors);
11821 ESTAT_ADD(rx_xon_pause_rcvd);
11822 ESTAT_ADD(rx_xoff_pause_rcvd);
11823 ESTAT_ADD(rx_mac_ctrl_rcvd);
11824 ESTAT_ADD(rx_xoff_entered);
11825 ESTAT_ADD(rx_frame_too_long_errors);
11826 ESTAT_ADD(rx_jabbers);
11827 ESTAT_ADD(rx_undersize_packets);
11828 ESTAT_ADD(rx_in_length_errors);
11829 ESTAT_ADD(rx_out_length_errors);
11830 ESTAT_ADD(rx_64_or_less_octet_packets);
11831 ESTAT_ADD(rx_65_to_127_octet_packets);
11832 ESTAT_ADD(rx_128_to_255_octet_packets);
11833 ESTAT_ADD(rx_256_to_511_octet_packets);
11834 ESTAT_ADD(rx_512_to_1023_octet_packets);
11835 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11836 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11837 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11838 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11839 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11841 ESTAT_ADD(tx_octets);
11842 ESTAT_ADD(tx_collisions);
11843 ESTAT_ADD(tx_xon_sent);
11844 ESTAT_ADD(tx_xoff_sent);
11845 ESTAT_ADD(tx_flow_control);
11846 ESTAT_ADD(tx_mac_errors);
11847 ESTAT_ADD(tx_single_collisions);
11848 ESTAT_ADD(tx_mult_collisions);
11849 ESTAT_ADD(tx_deferred);
11850 ESTAT_ADD(tx_excessive_collisions);
11851 ESTAT_ADD(tx_late_collisions);
11852 ESTAT_ADD(tx_collide_2times);
11853 ESTAT_ADD(tx_collide_3times);
11854 ESTAT_ADD(tx_collide_4times);
11855 ESTAT_ADD(tx_collide_5times);
11856 ESTAT_ADD(tx_collide_6times);
11857 ESTAT_ADD(tx_collide_7times);
11858 ESTAT_ADD(tx_collide_8times);
11859 ESTAT_ADD(tx_collide_9times);
11860 ESTAT_ADD(tx_collide_10times);
11861 ESTAT_ADD(tx_collide_11times);
11862 ESTAT_ADD(tx_collide_12times);
11863 ESTAT_ADD(tx_collide_13times);
11864 ESTAT_ADD(tx_collide_14times);
11865 ESTAT_ADD(tx_collide_15times);
11866 ESTAT_ADD(tx_ucast_packets);
11867 ESTAT_ADD(tx_mcast_packets);
11868 ESTAT_ADD(tx_bcast_packets);
11869 ESTAT_ADD(tx_carrier_sense_errors);
11870 ESTAT_ADD(tx_discards);
11871 ESTAT_ADD(tx_errors);
11873 ESTAT_ADD(dma_writeq_full);
11874 ESTAT_ADD(dma_write_prioq_full);
11875 ESTAT_ADD(rxbds_empty);
11876 ESTAT_ADD(rx_discards);
11877 ESTAT_ADD(rx_errors);
11878 ESTAT_ADD(rx_threshold_hit);
11880 ESTAT_ADD(dma_readq_full);
11881 ESTAT_ADD(dma_read_prioq_full);
11882 ESTAT_ADD(tx_comp_queue_full);
11884 ESTAT_ADD(ring_set_send_prod_index);
11885 ESTAT_ADD(ring_status_update);
11886 ESTAT_ADD(nic_irqs);
11887 ESTAT_ADD(nic_avoided_irqs);
11888 ESTAT_ADD(nic_tx_threshold_hit);
11890 ESTAT_ADD(mbuf_lwm_thresh_hit);
11893 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11895 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11896 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11898 stats->rx_packets = old_stats->rx_packets +
11899 get_stat64(&hw_stats->rx_ucast_packets) +
11900 get_stat64(&hw_stats->rx_mcast_packets) +
11901 get_stat64(&hw_stats->rx_bcast_packets);
11903 stats->tx_packets = old_stats->tx_packets +
11904 get_stat64(&hw_stats->tx_ucast_packets) +
11905 get_stat64(&hw_stats->tx_mcast_packets) +
11906 get_stat64(&hw_stats->tx_bcast_packets);
11908 stats->rx_bytes = old_stats->rx_bytes +
11909 get_stat64(&hw_stats->rx_octets);
11910 stats->tx_bytes = old_stats->tx_bytes +
11911 get_stat64(&hw_stats->tx_octets);
11913 stats->rx_errors = old_stats->rx_errors +
11914 get_stat64(&hw_stats->rx_errors);
11915 stats->tx_errors = old_stats->tx_errors +
11916 get_stat64(&hw_stats->tx_errors) +
11917 get_stat64(&hw_stats->tx_mac_errors) +
11918 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11919 get_stat64(&hw_stats->tx_discards);
11921 stats->multicast = old_stats->multicast +
11922 get_stat64(&hw_stats->rx_mcast_packets);
11923 stats->collisions = old_stats->collisions +
11924 get_stat64(&hw_stats->tx_collisions);
11926 stats->rx_length_errors = old_stats->rx_length_errors +
11927 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11928 get_stat64(&hw_stats->rx_undersize_packets);
11930 stats->rx_frame_errors = old_stats->rx_frame_errors +
11931 get_stat64(&hw_stats->rx_align_errors);
11932 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11933 get_stat64(&hw_stats->tx_discards);
11934 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11935 get_stat64(&hw_stats->tx_carrier_sense_errors);
11937 stats->rx_crc_errors = old_stats->rx_crc_errors +
11938 tg3_calc_crc_errors(tp);
11940 stats->rx_missed_errors = old_stats->rx_missed_errors +
11941 get_stat64(&hw_stats->rx_discards);
11943 stats->rx_dropped = tp->rx_dropped;
11944 stats->tx_dropped = tp->tx_dropped;
11947 static int tg3_get_regs_len(struct net_device *dev)
11949 return TG3_REG_BLK_SIZE;
11952 static void tg3_get_regs(struct net_device *dev,
11953 struct ethtool_regs *regs, void *_p)
11955 struct tg3 *tp = netdev_priv(dev);
11959 memset(_p, 0, TG3_REG_BLK_SIZE);
11961 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11964 tg3_full_lock(tp, 0);
11966 tg3_dump_legacy_regs(tp, (u32 *)_p);
11968 tg3_full_unlock(tp);
11971 static int tg3_get_eeprom_len(struct net_device *dev)
11973 struct tg3 *tp = netdev_priv(dev);
11975 return tp->nvram_size;
11978 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11980 struct tg3 *tp = netdev_priv(dev);
11981 int ret, cpmu_restore = 0;
11983 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11986 if (tg3_flag(tp, NO_NVRAM))
11989 offset = eeprom->offset;
11993 eeprom->magic = TG3_EEPROM_MAGIC;
11995 /* Override clock, link aware and link idle modes */
11996 if (tg3_flag(tp, CPMU_PRESENT)) {
11997 cpmu_val = tr32(TG3_CPMU_CTRL);
11998 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11999 CPMU_CTRL_LINK_IDLE_MODE)) {
12000 tw32(TG3_CPMU_CTRL, cpmu_val &
12001 ~(CPMU_CTRL_LINK_AWARE_MODE |
12002 CPMU_CTRL_LINK_IDLE_MODE));
12006 tg3_override_clk(tp);
12009 /* adjustments to start on required 4 byte boundary */
12010 b_offset = offset & 3;
12011 b_count = 4 - b_offset;
12012 if (b_count > len) {
12013 /* i.e. offset=1 len=2 */
12016 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12019 memcpy(data, ((char *)&val) + b_offset, b_count);
12022 eeprom->len += b_count;
12025 /* read bytes up to the last 4 byte boundary */
12026 pd = &data[eeprom->len];
12027 for (i = 0; i < (len - (len & 3)); i += 4) {
12028 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12035 memcpy(pd + i, &val, 4);
12036 if (need_resched()) {
12037 if (signal_pending(current)) {
12048 /* read last bytes not ending on 4 byte boundary */
12049 pd = &data[eeprom->len];
12051 b_offset = offset + len - b_count;
12052 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12055 memcpy(pd, &val, b_count);
12056 eeprom->len += b_count;
12061 /* Restore clock, link aware and link idle modes */
12062 tg3_restore_clk(tp);
12064 tw32(TG3_CPMU_CTRL, cpmu_val);
12069 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12071 struct tg3 *tp = netdev_priv(dev);
12073 u32 offset, len, b_offset, odd_len;
12075 __be32 start = 0, end;
12077 if (tg3_flag(tp, NO_NVRAM) ||
12078 eeprom->magic != TG3_EEPROM_MAGIC)
12081 offset = eeprom->offset;
12084 if ((b_offset = (offset & 3))) {
12085 /* adjustments to start on required 4 byte boundary */
12086 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12097 /* adjustments to end on required 4 byte boundary */
12099 len = (len + 3) & ~3;
12100 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12106 if (b_offset || odd_len) {
12107 buf = kmalloc(len, GFP_KERNEL);
12111 memcpy(buf, &start, 4);
12113 memcpy(buf+len-4, &end, 4);
12114 memcpy(buf + b_offset, data, eeprom->len);
12117 ret = tg3_nvram_write_block(tp, offset, len, buf);
12125 static int tg3_get_link_ksettings(struct net_device *dev,
12126 struct ethtool_link_ksettings *cmd)
12128 struct tg3 *tp = netdev_priv(dev);
12129 u32 supported, advertising;
12131 if (tg3_flag(tp, USE_PHYLIB)) {
12132 struct phy_device *phydev;
12133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12136 phy_ethtool_ksettings_get(phydev, cmd);
12141 supported = (SUPPORTED_Autoneg);
12143 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12144 supported |= (SUPPORTED_1000baseT_Half |
12145 SUPPORTED_1000baseT_Full);
12147 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12148 supported |= (SUPPORTED_100baseT_Half |
12149 SUPPORTED_100baseT_Full |
12150 SUPPORTED_10baseT_Half |
12151 SUPPORTED_10baseT_Full |
12153 cmd->base.port = PORT_TP;
12155 supported |= SUPPORTED_FIBRE;
12156 cmd->base.port = PORT_FIBRE;
12158 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12161 advertising = tp->link_config.advertising;
12162 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12163 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12164 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12165 advertising |= ADVERTISED_Pause;
12167 advertising |= ADVERTISED_Pause |
12168 ADVERTISED_Asym_Pause;
12170 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12171 advertising |= ADVERTISED_Asym_Pause;
12174 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12177 if (netif_running(dev) && tp->link_up) {
12178 cmd->base.speed = tp->link_config.active_speed;
12179 cmd->base.duplex = tp->link_config.active_duplex;
12180 ethtool_convert_legacy_u32_to_link_mode(
12181 cmd->link_modes.lp_advertising,
12182 tp->link_config.rmt_adv);
12184 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12185 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12186 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12188 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12191 cmd->base.speed = SPEED_UNKNOWN;
12192 cmd->base.duplex = DUPLEX_UNKNOWN;
12193 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12195 cmd->base.phy_address = tp->phy_addr;
12196 cmd->base.autoneg = tp->link_config.autoneg;
12200 static int tg3_set_link_ksettings(struct net_device *dev,
12201 const struct ethtool_link_ksettings *cmd)
12203 struct tg3 *tp = netdev_priv(dev);
12204 u32 speed = cmd->base.speed;
12207 if (tg3_flag(tp, USE_PHYLIB)) {
12208 struct phy_device *phydev;
12209 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12211 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12212 return phy_ethtool_ksettings_set(phydev, cmd);
12215 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12216 cmd->base.autoneg != AUTONEG_DISABLE)
12219 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12220 cmd->base.duplex != DUPLEX_FULL &&
12221 cmd->base.duplex != DUPLEX_HALF)
12224 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12225 cmd->link_modes.advertising);
12227 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12228 u32 mask = ADVERTISED_Autoneg |
12230 ADVERTISED_Asym_Pause;
12232 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12233 mask |= ADVERTISED_1000baseT_Half |
12234 ADVERTISED_1000baseT_Full;
12236 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12237 mask |= ADVERTISED_100baseT_Half |
12238 ADVERTISED_100baseT_Full |
12239 ADVERTISED_10baseT_Half |
12240 ADVERTISED_10baseT_Full |
12243 mask |= ADVERTISED_FIBRE;
12245 if (advertising & ~mask)
12248 mask &= (ADVERTISED_1000baseT_Half |
12249 ADVERTISED_1000baseT_Full |
12250 ADVERTISED_100baseT_Half |
12251 ADVERTISED_100baseT_Full |
12252 ADVERTISED_10baseT_Half |
12253 ADVERTISED_10baseT_Full);
12255 advertising &= mask;
12257 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12258 if (speed != SPEED_1000)
12261 if (cmd->base.duplex != DUPLEX_FULL)
12264 if (speed != SPEED_100 &&
12270 tg3_full_lock(tp, 0);
12272 tp->link_config.autoneg = cmd->base.autoneg;
12273 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12274 tp->link_config.advertising = (advertising |
12275 ADVERTISED_Autoneg);
12276 tp->link_config.speed = SPEED_UNKNOWN;
12277 tp->link_config.duplex = DUPLEX_UNKNOWN;
12279 tp->link_config.advertising = 0;
12280 tp->link_config.speed = speed;
12281 tp->link_config.duplex = cmd->base.duplex;
12284 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12286 tg3_warn_mgmt_link_flap(tp);
12288 if (netif_running(dev))
12289 tg3_setup_phy(tp, true);
12291 tg3_full_unlock(tp);
12296 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12298 struct tg3 *tp = netdev_priv(dev);
12300 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12301 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12302 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12303 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12306 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 struct tg3 *tp = netdev_priv(dev);
12310 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12311 wol->supported = WAKE_MAGIC;
12313 wol->supported = 0;
12315 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12316 wol->wolopts = WAKE_MAGIC;
12317 memset(&wol->sopass, 0, sizeof(wol->sopass));
12320 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322 struct tg3 *tp = netdev_priv(dev);
12323 struct device *dp = &tp->pdev->dev;
12325 if (wol->wolopts & ~WAKE_MAGIC)
12327 if ((wol->wolopts & WAKE_MAGIC) &&
12328 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12331 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12333 if (device_may_wakeup(dp))
12334 tg3_flag_set(tp, WOL_ENABLE);
12336 tg3_flag_clear(tp, WOL_ENABLE);
12341 static u32 tg3_get_msglevel(struct net_device *dev)
12343 struct tg3 *tp = netdev_priv(dev);
12344 return tp->msg_enable;
12347 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12349 struct tg3 *tp = netdev_priv(dev);
12350 tp->msg_enable = value;
12353 static int tg3_nway_reset(struct net_device *dev)
12355 struct tg3 *tp = netdev_priv(dev);
12358 if (!netif_running(dev))
12361 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12364 tg3_warn_mgmt_link_flap(tp);
12366 if (tg3_flag(tp, USE_PHYLIB)) {
12367 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12369 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12373 spin_lock_bh(&tp->lock);
12375 tg3_readphy(tp, MII_BMCR, &bmcr);
12376 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12377 ((bmcr & BMCR_ANENABLE) ||
12378 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12379 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12383 spin_unlock_bh(&tp->lock);
12389 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12391 struct tg3 *tp = netdev_priv(dev);
12393 ering->rx_max_pending = tp->rx_std_ring_mask;
12394 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12395 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12397 ering->rx_jumbo_max_pending = 0;
12399 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12401 ering->rx_pending = tp->rx_pending;
12402 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12403 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12405 ering->rx_jumbo_pending = 0;
12407 ering->tx_pending = tp->napi[0].tx_pending;
12410 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12412 struct tg3 *tp = netdev_priv(dev);
12413 int i, irq_sync = 0, err = 0;
12415 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12416 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12417 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12418 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12419 (tg3_flag(tp, TSO_BUG) &&
12420 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12423 if (netif_running(dev)) {
12425 tg3_netif_stop(tp);
12429 tg3_full_lock(tp, irq_sync);
12431 tp->rx_pending = ering->rx_pending;
12433 if (tg3_flag(tp, MAX_RXPEND_64) &&
12434 tp->rx_pending > 63)
12435 tp->rx_pending = 63;
12437 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12438 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12440 for (i = 0; i < tp->irq_max; i++)
12441 tp->napi[i].tx_pending = ering->tx_pending;
12443 if (netif_running(dev)) {
12444 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12445 err = tg3_restart_hw(tp, false);
12447 tg3_netif_start(tp);
12450 tg3_full_unlock(tp);
12452 if (irq_sync && !err)
12458 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12460 struct tg3 *tp = netdev_priv(dev);
12462 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12464 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12465 epause->rx_pause = 1;
12467 epause->rx_pause = 0;
12469 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12470 epause->tx_pause = 1;
12472 epause->tx_pause = 0;
12475 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12477 struct tg3 *tp = netdev_priv(dev);
12480 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12481 tg3_warn_mgmt_link_flap(tp);
12483 if (tg3_flag(tp, USE_PHYLIB)) {
12485 struct phy_device *phydev;
12487 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12489 if (!(phydev->supported & SUPPORTED_Pause) ||
12490 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12491 (epause->rx_pause != epause->tx_pause)))
12494 tp->link_config.flowctrl = 0;
12495 if (epause->rx_pause) {
12496 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12498 if (epause->tx_pause) {
12499 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12500 newadv = ADVERTISED_Pause;
12502 newadv = ADVERTISED_Pause |
12503 ADVERTISED_Asym_Pause;
12504 } else if (epause->tx_pause) {
12505 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12506 newadv = ADVERTISED_Asym_Pause;
12510 if (epause->autoneg)
12511 tg3_flag_set(tp, PAUSE_AUTONEG);
12513 tg3_flag_clear(tp, PAUSE_AUTONEG);
12515 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12516 u32 oldadv = phydev->advertising &
12517 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12518 if (oldadv != newadv) {
12519 phydev->advertising &=
12520 ~(ADVERTISED_Pause |
12521 ADVERTISED_Asym_Pause);
12522 phydev->advertising |= newadv;
12523 if (phydev->autoneg) {
12525 * Always renegotiate the link to
12526 * inform our link partner of our
12527 * flow control settings, even if the
12528 * flow control is forced. Let
12529 * tg3_adjust_link() do the final
12530 * flow control setup.
12532 return phy_start_aneg(phydev);
12536 if (!epause->autoneg)
12537 tg3_setup_flow_control(tp, 0, 0);
12539 tp->link_config.advertising &=
12540 ~(ADVERTISED_Pause |
12541 ADVERTISED_Asym_Pause);
12542 tp->link_config.advertising |= newadv;
12547 if (netif_running(dev)) {
12548 tg3_netif_stop(tp);
12552 tg3_full_lock(tp, irq_sync);
12554 if (epause->autoneg)
12555 tg3_flag_set(tp, PAUSE_AUTONEG);
12557 tg3_flag_clear(tp, PAUSE_AUTONEG);
12558 if (epause->rx_pause)
12559 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12561 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12562 if (epause->tx_pause)
12563 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12565 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12567 if (netif_running(dev)) {
12568 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12569 err = tg3_restart_hw(tp, false);
12571 tg3_netif_start(tp);
12574 tg3_full_unlock(tp);
12577 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12582 static int tg3_get_sset_count(struct net_device *dev, int sset)
12586 return TG3_NUM_TEST;
12588 return TG3_NUM_STATS;
12590 return -EOPNOTSUPP;
12594 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12595 u32 *rules __always_unused)
12597 struct tg3 *tp = netdev_priv(dev);
12599 if (!tg3_flag(tp, SUPPORT_MSIX))
12600 return -EOPNOTSUPP;
12602 switch (info->cmd) {
12603 case ETHTOOL_GRXRINGS:
12604 if (netif_running(tp->dev))
12605 info->data = tp->rxq_cnt;
12607 info->data = num_online_cpus();
12608 if (info->data > TG3_RSS_MAX_NUM_QS)
12609 info->data = TG3_RSS_MAX_NUM_QS;
12615 return -EOPNOTSUPP;
12619 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12622 struct tg3 *tp = netdev_priv(dev);
12624 if (tg3_flag(tp, SUPPORT_MSIX))
12625 size = TG3_RSS_INDIR_TBL_SIZE;
12630 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12632 struct tg3 *tp = netdev_priv(dev);
12636 *hfunc = ETH_RSS_HASH_TOP;
12640 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12641 indir[i] = tp->rss_ind_tbl[i];
12646 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12649 struct tg3 *tp = netdev_priv(dev);
12652 /* We require at least one supported parameter to be changed and no
12653 * change in any of the unsupported parameters
12656 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12657 return -EOPNOTSUPP;
12662 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12663 tp->rss_ind_tbl[i] = indir[i];
12665 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12668 /* It is legal to write the indirection
12669 * table while the device is running.
12671 tg3_full_lock(tp, 0);
12672 tg3_rss_write_indir_tbl(tp);
12673 tg3_full_unlock(tp);
12678 static void tg3_get_channels(struct net_device *dev,
12679 struct ethtool_channels *channel)
12681 struct tg3 *tp = netdev_priv(dev);
12682 u32 deflt_qs = netif_get_num_default_rss_queues();
12684 channel->max_rx = tp->rxq_max;
12685 channel->max_tx = tp->txq_max;
12687 if (netif_running(dev)) {
12688 channel->rx_count = tp->rxq_cnt;
12689 channel->tx_count = tp->txq_cnt;
12692 channel->rx_count = tp->rxq_req;
12694 channel->rx_count = min(deflt_qs, tp->rxq_max);
12697 channel->tx_count = tp->txq_req;
12699 channel->tx_count = min(deflt_qs, tp->txq_max);
12703 static int tg3_set_channels(struct net_device *dev,
12704 struct ethtool_channels *channel)
12706 struct tg3 *tp = netdev_priv(dev);
12708 if (!tg3_flag(tp, SUPPORT_MSIX))
12709 return -EOPNOTSUPP;
12711 if (channel->rx_count > tp->rxq_max ||
12712 channel->tx_count > tp->txq_max)
12715 tp->rxq_req = channel->rx_count;
12716 tp->txq_req = channel->tx_count;
12718 if (!netif_running(dev))
12723 tg3_carrier_off(tp);
12725 tg3_start(tp, true, false, false);
12730 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12732 switch (stringset) {
12734 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12737 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12740 WARN_ON(1); /* we need a WARN() */
12745 static int tg3_set_phys_id(struct net_device *dev,
12746 enum ethtool_phys_id_state state)
12748 struct tg3 *tp = netdev_priv(dev);
12750 if (!netif_running(tp->dev))
12754 case ETHTOOL_ID_ACTIVE:
12755 return 1; /* cycle on/off once per second */
12757 case ETHTOOL_ID_ON:
12758 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759 LED_CTRL_1000MBPS_ON |
12760 LED_CTRL_100MBPS_ON |
12761 LED_CTRL_10MBPS_ON |
12762 LED_CTRL_TRAFFIC_OVERRIDE |
12763 LED_CTRL_TRAFFIC_BLINK |
12764 LED_CTRL_TRAFFIC_LED);
12767 case ETHTOOL_ID_OFF:
12768 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769 LED_CTRL_TRAFFIC_OVERRIDE);
12772 case ETHTOOL_ID_INACTIVE:
12773 tw32(MAC_LED_CTRL, tp->led_ctrl);
12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781 struct ethtool_stats *estats, u64 *tmp_stats)
12783 struct tg3 *tp = netdev_priv(dev);
12786 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12788 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12795 u32 offset = 0, len = 0;
12798 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12801 if (magic == TG3_EEPROM_MAGIC) {
12802 for (offset = TG3_NVM_DIR_START;
12803 offset < TG3_NVM_DIR_END;
12804 offset += TG3_NVM_DIRENT_SIZE) {
12805 if (tg3_nvram_read(tp, offset, &val))
12808 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809 TG3_NVM_DIRTYPE_EXTVPD)
12813 if (offset != TG3_NVM_DIR_END) {
12814 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815 if (tg3_nvram_read(tp, offset + 4, &offset))
12818 offset = tg3_nvram_logical_addr(tp, offset);
12822 if (!offset || !len) {
12823 offset = TG3_NVM_VPD_OFF;
12824 len = TG3_NVM_VPD_LEN;
12827 buf = kmalloc(len, GFP_KERNEL);
12831 if (magic == TG3_EEPROM_MAGIC) {
12832 for (i = 0; i < len; i += 4) {
12833 /* The data is in little-endian format in NVRAM.
12834 * Use the big-endian read routines to preserve
12835 * the byte order as it exists in NVRAM.
12837 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12843 unsigned int pos = 0;
12845 ptr = (u8 *)&buf[0];
12846 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12847 cnt = pci_read_vpd(tp->pdev, pos,
12849 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12867 #define NVRAM_TEST_SIZE 0x100
12868 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12869 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12870 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12871 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12872 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12873 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12874 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12875 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12877 static int tg3_test_nvram(struct tg3 *tp)
12879 u32 csum, magic, len;
12881 int i, j, k, err = 0, size;
12883 if (tg3_flag(tp, NO_NVRAM))
12886 if (tg3_nvram_read(tp, 0, &magic) != 0)
12889 if (magic == TG3_EEPROM_MAGIC)
12890 size = NVRAM_TEST_SIZE;
12891 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12892 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12893 TG3_EEPROM_SB_FORMAT_1) {
12894 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12895 case TG3_EEPROM_SB_REVISION_0:
12896 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12898 case TG3_EEPROM_SB_REVISION_2:
12899 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12901 case TG3_EEPROM_SB_REVISION_3:
12902 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12904 case TG3_EEPROM_SB_REVISION_4:
12905 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12907 case TG3_EEPROM_SB_REVISION_5:
12908 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12910 case TG3_EEPROM_SB_REVISION_6:
12911 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12918 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12919 size = NVRAM_SELFBOOT_HW_SIZE;
12923 buf = kmalloc(size, GFP_KERNEL);
12928 for (i = 0, j = 0; i < size; i += 4, j++) {
12929 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12936 /* Selfboot format */
12937 magic = be32_to_cpu(buf[0]);
12938 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12939 TG3_EEPROM_MAGIC_FW) {
12940 u8 *buf8 = (u8 *) buf, csum8 = 0;
12942 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12943 TG3_EEPROM_SB_REVISION_2) {
12944 /* For rev 2, the csum doesn't include the MBA. */
12945 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12947 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12950 for (i = 0; i < size; i++)
12963 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12964 TG3_EEPROM_MAGIC_HW) {
12965 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12966 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12967 u8 *buf8 = (u8 *) buf;
12969 /* Separate the parity bits and the data bytes. */
12970 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12971 if ((i == 0) || (i == 8)) {
12975 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12976 parity[k++] = buf8[i] & msk;
12978 } else if (i == 16) {
12982 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12983 parity[k++] = buf8[i] & msk;
12986 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12987 parity[k++] = buf8[i] & msk;
12990 data[j++] = buf8[i];
12994 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12995 u8 hw8 = hweight8(data[i]);
12997 if ((hw8 & 0x1) && parity[i])
12999 else if (!(hw8 & 0x1) && !parity[i])
13008 /* Bootstrap checksum at offset 0x10 */
13009 csum = calc_crc((unsigned char *) buf, 0x10);
13010 if (csum != le32_to_cpu(buf[0x10/4]))
13013 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13014 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13015 if (csum != le32_to_cpu(buf[0xfc/4]))
13020 buf = tg3_vpd_readblock(tp, &len);
13024 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13026 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13030 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13033 i += PCI_VPD_LRDT_TAG_SIZE;
13034 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13035 PCI_VPD_RO_KEYWORD_CHKSUM);
13039 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13041 for (i = 0; i <= j; i++)
13042 csum8 += ((u8 *)buf)[i];
13056 #define TG3_SERDES_TIMEOUT_SEC 2
13057 #define TG3_COPPER_TIMEOUT_SEC 6
13059 static int tg3_test_link(struct tg3 *tp)
13063 if (!netif_running(tp->dev))
13066 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13067 max = TG3_SERDES_TIMEOUT_SEC;
13069 max = TG3_COPPER_TIMEOUT_SEC;
13071 for (i = 0; i < max; i++) {
13075 if (msleep_interruptible(1000))
13082 /* Only test the commonly used registers */
13083 static int tg3_test_registers(struct tg3 *tp)
13085 int i, is_5705, is_5750;
13086 u32 offset, read_mask, write_mask, val, save_val, read_val;
13090 #define TG3_FL_5705 0x1
13091 #define TG3_FL_NOT_5705 0x2
13092 #define TG3_FL_NOT_5788 0x4
13093 #define TG3_FL_NOT_5750 0x8
13097 /* MAC Control Registers */
13098 { MAC_MODE, TG3_FL_NOT_5705,
13099 0x00000000, 0x00ef6f8c },
13100 { MAC_MODE, TG3_FL_5705,
13101 0x00000000, 0x01ef6b8c },
13102 { MAC_STATUS, TG3_FL_NOT_5705,
13103 0x03800107, 0x00000000 },
13104 { MAC_STATUS, TG3_FL_5705,
13105 0x03800100, 0x00000000 },
13106 { MAC_ADDR_0_HIGH, 0x0000,
13107 0x00000000, 0x0000ffff },
13108 { MAC_ADDR_0_LOW, 0x0000,
13109 0x00000000, 0xffffffff },
13110 { MAC_RX_MTU_SIZE, 0x0000,
13111 0x00000000, 0x0000ffff },
13112 { MAC_TX_MODE, 0x0000,
13113 0x00000000, 0x00000070 },
13114 { MAC_TX_LENGTHS, 0x0000,
13115 0x00000000, 0x00003fff },
13116 { MAC_RX_MODE, TG3_FL_NOT_5705,
13117 0x00000000, 0x000007fc },
13118 { MAC_RX_MODE, TG3_FL_5705,
13119 0x00000000, 0x000007dc },
13120 { MAC_HASH_REG_0, 0x0000,
13121 0x00000000, 0xffffffff },
13122 { MAC_HASH_REG_1, 0x0000,
13123 0x00000000, 0xffffffff },
13124 { MAC_HASH_REG_2, 0x0000,
13125 0x00000000, 0xffffffff },
13126 { MAC_HASH_REG_3, 0x0000,
13127 0x00000000, 0xffffffff },
13129 /* Receive Data and Receive BD Initiator Control Registers. */
13130 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13131 0x00000000, 0xffffffff },
13132 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13133 0x00000000, 0xffffffff },
13134 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13135 0x00000000, 0x00000003 },
13136 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13137 0x00000000, 0xffffffff },
13138 { RCVDBDI_STD_BD+0, 0x0000,
13139 0x00000000, 0xffffffff },
13140 { RCVDBDI_STD_BD+4, 0x0000,
13141 0x00000000, 0xffffffff },
13142 { RCVDBDI_STD_BD+8, 0x0000,
13143 0x00000000, 0xffff0002 },
13144 { RCVDBDI_STD_BD+0xc, 0x0000,
13145 0x00000000, 0xffffffff },
13147 /* Receive BD Initiator Control Registers. */
13148 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13149 0x00000000, 0xffffffff },
13150 { RCVBDI_STD_THRESH, TG3_FL_5705,
13151 0x00000000, 0x000003ff },
13152 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13153 0x00000000, 0xffffffff },
13155 /* Host Coalescing Control Registers. */
13156 { HOSTCC_MODE, TG3_FL_NOT_5705,
13157 0x00000000, 0x00000004 },
13158 { HOSTCC_MODE, TG3_FL_5705,
13159 0x00000000, 0x000000f6 },
13160 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13161 0x00000000, 0xffffffff },
13162 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13163 0x00000000, 0x000003ff },
13164 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13165 0x00000000, 0xffffffff },
13166 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13167 0x00000000, 0x000003ff },
13168 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13169 0x00000000, 0xffffffff },
13170 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13171 0x00000000, 0x000000ff },
13172 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13173 0x00000000, 0xffffffff },
13174 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13175 0x00000000, 0x000000ff },
13176 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177 0x00000000, 0xffffffff },
13178 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13181 0x00000000, 0xffffffff },
13182 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13183 0x00000000, 0x000000ff },
13184 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13185 0x00000000, 0xffffffff },
13186 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13187 0x00000000, 0x000000ff },
13188 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13189 0x00000000, 0xffffffff },
13190 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13191 0x00000000, 0xffffffff },
13192 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13193 0x00000000, 0xffffffff },
13194 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13197 0x00000000, 0xffffffff },
13198 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13199 0xffffffff, 0x00000000 },
13200 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13201 0xffffffff, 0x00000000 },
13203 /* Buffer Manager Control Registers. */
13204 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13205 0x00000000, 0x007fff80 },
13206 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13207 0x00000000, 0x007fffff },
13208 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13209 0x00000000, 0x0000003f },
13210 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13211 0x00000000, 0x000001ff },
13212 { BUFMGR_MB_HIGH_WATER, 0x0000,
13213 0x00000000, 0x000001ff },
13214 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13215 0xffffffff, 0x00000000 },
13216 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13217 0xffffffff, 0x00000000 },
13219 /* Mailbox Registers */
13220 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13221 0x00000000, 0x000001ff },
13222 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13223 0x00000000, 0x000001ff },
13224 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13225 0x00000000, 0x000007ff },
13226 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13227 0x00000000, 0x000001ff },
13229 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13232 is_5705 = is_5750 = 0;
13233 if (tg3_flag(tp, 5705_PLUS)) {
13235 if (tg3_flag(tp, 5750_PLUS))
13239 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13240 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13243 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13246 if (tg3_flag(tp, IS_5788) &&
13247 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13250 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13253 offset = (u32) reg_tbl[i].offset;
13254 read_mask = reg_tbl[i].read_mask;
13255 write_mask = reg_tbl[i].write_mask;
13257 /* Save the original register content */
13258 save_val = tr32(offset);
13260 /* Determine the read-only value. */
13261 read_val = save_val & read_mask;
13263 /* Write zero to the register, then make sure the read-only bits
13264 * are not changed and the read/write bits are all zeros.
13268 val = tr32(offset);
13270 /* Test the read-only and read/write bits. */
13271 if (((val & read_mask) != read_val) || (val & write_mask))
13274 /* Write ones to all the bits defined by RdMask and WrMask, then
13275 * make sure the read-only bits are not changed and the
13276 * read/write bits are all ones.
13278 tw32(offset, read_mask | write_mask);
13280 val = tr32(offset);
13282 /* Test the read-only bits. */
13283 if ((val & read_mask) != read_val)
13286 /* Test the read/write bits. */
13287 if ((val & write_mask) != write_mask)
13290 tw32(offset, save_val);
13296 if (netif_msg_hw(tp))
13297 netdev_err(tp->dev,
13298 "Register test failed at offset %x\n", offset);
13299 tw32(offset, save_val);
13303 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13305 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13309 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13310 for (j = 0; j < len; j += 4) {
13313 tg3_write_mem(tp, offset + j, test_pattern[i]);
13314 tg3_read_mem(tp, offset + j, &val);
13315 if (val != test_pattern[i])
13322 static int tg3_test_memory(struct tg3 *tp)
13324 static struct mem_entry {
13327 } mem_tbl_570x[] = {
13328 { 0x00000000, 0x00b50},
13329 { 0x00002000, 0x1c000},
13330 { 0xffffffff, 0x00000}
13331 }, mem_tbl_5705[] = {
13332 { 0x00000100, 0x0000c},
13333 { 0x00000200, 0x00008},
13334 { 0x00004000, 0x00800},
13335 { 0x00006000, 0x01000},
13336 { 0x00008000, 0x02000},
13337 { 0x00010000, 0x0e000},
13338 { 0xffffffff, 0x00000}
13339 }, mem_tbl_5755[] = {
13340 { 0x00000200, 0x00008},
13341 { 0x00004000, 0x00800},
13342 { 0x00006000, 0x00800},
13343 { 0x00008000, 0x02000},
13344 { 0x00010000, 0x0c000},
13345 { 0xffffffff, 0x00000}
13346 }, mem_tbl_5906[] = {
13347 { 0x00000200, 0x00008},
13348 { 0x00004000, 0x00400},
13349 { 0x00006000, 0x00400},
13350 { 0x00008000, 0x01000},
13351 { 0x00010000, 0x01000},
13352 { 0xffffffff, 0x00000}
13353 }, mem_tbl_5717[] = {
13354 { 0x00000200, 0x00008},
13355 { 0x00010000, 0x0a000},
13356 { 0x00020000, 0x13c00},
13357 { 0xffffffff, 0x00000}
13358 }, mem_tbl_57765[] = {
13359 { 0x00000200, 0x00008},
13360 { 0x00004000, 0x00800},
13361 { 0x00006000, 0x09800},
13362 { 0x00010000, 0x0a000},
13363 { 0xffffffff, 0x00000}
13365 struct mem_entry *mem_tbl;
13369 if (tg3_flag(tp, 5717_PLUS))
13370 mem_tbl = mem_tbl_5717;
13371 else if (tg3_flag(tp, 57765_CLASS) ||
13372 tg3_asic_rev(tp) == ASIC_REV_5762)
13373 mem_tbl = mem_tbl_57765;
13374 else if (tg3_flag(tp, 5755_PLUS))
13375 mem_tbl = mem_tbl_5755;
13376 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13377 mem_tbl = mem_tbl_5906;
13378 else if (tg3_flag(tp, 5705_PLUS))
13379 mem_tbl = mem_tbl_5705;
13381 mem_tbl = mem_tbl_570x;
13383 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13384 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13392 #define TG3_TSO_MSS 500
13394 #define TG3_TSO_IP_HDR_LEN 20
13395 #define TG3_TSO_TCP_HDR_LEN 20
13396 #define TG3_TSO_TCP_OPT_LEN 12
13398 static const u8 tg3_tso_header[] = {
13400 0x45, 0x00, 0x00, 0x00,
13401 0x00, 0x00, 0x40, 0x00,
13402 0x40, 0x06, 0x00, 0x00,
13403 0x0a, 0x00, 0x00, 0x01,
13404 0x0a, 0x00, 0x00, 0x02,
13405 0x0d, 0x00, 0xe0, 0x00,
13406 0x00, 0x00, 0x01, 0x00,
13407 0x00, 0x00, 0x02, 0x00,
13408 0x80, 0x10, 0x10, 0x00,
13409 0x14, 0x09, 0x00, 0x00,
13410 0x01, 0x01, 0x08, 0x0a,
13411 0x11, 0x11, 0x11, 0x11,
13412 0x11, 0x11, 0x11, 0x11,
13415 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13417 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13418 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13420 struct sk_buff *skb;
13421 u8 *tx_data, *rx_data;
13423 int num_pkts, tx_len, rx_len, i, err;
13424 struct tg3_rx_buffer_desc *desc;
13425 struct tg3_napi *tnapi, *rnapi;
13426 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13428 tnapi = &tp->napi[0];
13429 rnapi = &tp->napi[0];
13430 if (tp->irq_cnt > 1) {
13431 if (tg3_flag(tp, ENABLE_RSS))
13432 rnapi = &tp->napi[1];
13433 if (tg3_flag(tp, ENABLE_TSS))
13434 tnapi = &tp->napi[1];
13436 coal_now = tnapi->coal_now | rnapi->coal_now;
13441 skb = netdev_alloc_skb(tp->dev, tx_len);
13445 tx_data = skb_put(skb, tx_len);
13446 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13447 memset(tx_data + ETH_ALEN, 0x0, 8);
13449 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13451 if (tso_loopback) {
13452 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13454 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13455 TG3_TSO_TCP_OPT_LEN;
13457 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13458 sizeof(tg3_tso_header));
13461 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13462 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13464 /* Set the total length field in the IP header */
13465 iph->tot_len = htons((u16)(mss + hdr_len));
13467 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13468 TXD_FLAG_CPU_POST_DMA);
13470 if (tg3_flag(tp, HW_TSO_1) ||
13471 tg3_flag(tp, HW_TSO_2) ||
13472 tg3_flag(tp, HW_TSO_3)) {
13474 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13475 th = (struct tcphdr *)&tx_data[val];
13478 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13480 if (tg3_flag(tp, HW_TSO_3)) {
13481 mss |= (hdr_len & 0xc) << 12;
13482 if (hdr_len & 0x10)
13483 base_flags |= 0x00000010;
13484 base_flags |= (hdr_len & 0x3e0) << 5;
13485 } else if (tg3_flag(tp, HW_TSO_2))
13486 mss |= hdr_len << 9;
13487 else if (tg3_flag(tp, HW_TSO_1) ||
13488 tg3_asic_rev(tp) == ASIC_REV_5705) {
13489 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13491 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13494 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13497 data_off = ETH_HLEN;
13499 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13500 tx_len > VLAN_ETH_FRAME_LEN)
13501 base_flags |= TXD_FLAG_JMB_PKT;
13504 for (i = data_off; i < tx_len; i++)
13505 tx_data[i] = (u8) (i & 0xff);
13507 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13508 if (pci_dma_mapping_error(tp->pdev, map)) {
13509 dev_kfree_skb(skb);
13513 val = tnapi->tx_prod;
13514 tnapi->tx_buffers[val].skb = skb;
13515 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13517 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13522 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13524 budget = tg3_tx_avail(tnapi);
13525 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13526 base_flags | TXD_FLAG_END, mss, 0)) {
13527 tnapi->tx_buffers[val].skb = NULL;
13528 dev_kfree_skb(skb);
13534 /* Sync BD data before updating mailbox */
13537 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13538 tr32_mailbox(tnapi->prodmbox);
13542 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13543 for (i = 0; i < 35; i++) {
13544 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13549 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13550 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13551 if ((tx_idx == tnapi->tx_prod) &&
13552 (rx_idx == (rx_start_idx + num_pkts)))
13556 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13557 dev_kfree_skb(skb);
13559 if (tx_idx != tnapi->tx_prod)
13562 if (rx_idx != rx_start_idx + num_pkts)
13566 while (rx_idx != rx_start_idx) {
13567 desc = &rnapi->rx_rcb[rx_start_idx++];
13568 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13569 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13571 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13572 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13575 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13578 if (!tso_loopback) {
13579 if (rx_len != tx_len)
13582 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13583 if (opaque_key != RXD_OPAQUE_RING_STD)
13586 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13589 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13590 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13591 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13595 if (opaque_key == RXD_OPAQUE_RING_STD) {
13596 rx_data = tpr->rx_std_buffers[desc_idx].data;
13597 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13599 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13600 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13601 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13606 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13607 PCI_DMA_FROMDEVICE);
13609 rx_data += TG3_RX_OFFSET(tp);
13610 for (i = data_off; i < rx_len; i++, val++) {
13611 if (*(rx_data + i) != (u8) (val & 0xff))
13618 /* tg3_free_rings will unmap and free the rx_data */
13623 #define TG3_STD_LOOPBACK_FAILED 1
13624 #define TG3_JMB_LOOPBACK_FAILED 2
13625 #define TG3_TSO_LOOPBACK_FAILED 4
13626 #define TG3_LOOPBACK_FAILED \
13627 (TG3_STD_LOOPBACK_FAILED | \
13628 TG3_JMB_LOOPBACK_FAILED | \
13629 TG3_TSO_LOOPBACK_FAILED)
13631 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13635 u32 jmb_pkt_sz = 9000;
13638 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13640 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13641 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13643 if (!netif_running(tp->dev)) {
13644 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13647 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13651 err = tg3_reset_hw(tp, true);
13653 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13656 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13660 if (tg3_flag(tp, ENABLE_RSS)) {
13663 /* Reroute all rx packets to the 1st queue */
13664 for (i = MAC_RSS_INDIR_TBL_0;
13665 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13669 /* HW errata - mac loopback fails in some cases on 5780.
13670 * Normal traffic and PHY loopback are not affected by
13671 * errata. Also, the MAC loopback test is deprecated for
13672 * all newer ASIC revisions.
13674 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13675 !tg3_flag(tp, CPMU_PRESENT)) {
13676 tg3_mac_loopback(tp, true);
13678 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13681 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13685 tg3_mac_loopback(tp, false);
13688 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13689 !tg3_flag(tp, USE_PHYLIB)) {
13692 tg3_phy_lpbk_set(tp, 0, false);
13694 /* Wait for link */
13695 for (i = 0; i < 100; i++) {
13696 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13701 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13702 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13703 if (tg3_flag(tp, TSO_CAPABLE) &&
13704 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13705 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13706 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13707 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13708 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13711 tg3_phy_lpbk_set(tp, 0, true);
13713 /* All link indications report up, but the hardware
13714 * isn't really ready for about 20 msec. Double it
13719 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13720 data[TG3_EXT_LOOPB_TEST] |=
13721 TG3_STD_LOOPBACK_FAILED;
13722 if (tg3_flag(tp, TSO_CAPABLE) &&
13723 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13724 data[TG3_EXT_LOOPB_TEST] |=
13725 TG3_TSO_LOOPBACK_FAILED;
13726 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13727 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13728 data[TG3_EXT_LOOPB_TEST] |=
13729 TG3_JMB_LOOPBACK_FAILED;
13732 /* Re-enable gphy autopowerdown. */
13733 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13734 tg3_phy_toggle_apd(tp, true);
13737 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13738 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13741 tp->phy_flags |= eee_cap;
13746 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13749 struct tg3 *tp = netdev_priv(dev);
13750 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13752 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13753 if (tg3_power_up(tp)) {
13754 etest->flags |= ETH_TEST_FL_FAILED;
13755 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13758 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13761 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13763 if (tg3_test_nvram(tp) != 0) {
13764 etest->flags |= ETH_TEST_FL_FAILED;
13765 data[TG3_NVRAM_TEST] = 1;
13767 if (!doextlpbk && tg3_test_link(tp)) {
13768 etest->flags |= ETH_TEST_FL_FAILED;
13769 data[TG3_LINK_TEST] = 1;
13771 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13772 int err, err2 = 0, irq_sync = 0;
13774 if (netif_running(dev)) {
13776 tg3_netif_stop(tp);
13780 tg3_full_lock(tp, irq_sync);
13781 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13782 err = tg3_nvram_lock(tp);
13783 tg3_halt_cpu(tp, RX_CPU_BASE);
13784 if (!tg3_flag(tp, 5705_PLUS))
13785 tg3_halt_cpu(tp, TX_CPU_BASE);
13787 tg3_nvram_unlock(tp);
13789 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13792 if (tg3_test_registers(tp) != 0) {
13793 etest->flags |= ETH_TEST_FL_FAILED;
13794 data[TG3_REGISTER_TEST] = 1;
13797 if (tg3_test_memory(tp) != 0) {
13798 etest->flags |= ETH_TEST_FL_FAILED;
13799 data[TG3_MEMORY_TEST] = 1;
13803 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13805 if (tg3_test_loopback(tp, data, doextlpbk))
13806 etest->flags |= ETH_TEST_FL_FAILED;
13808 tg3_full_unlock(tp);
13810 if (tg3_test_interrupt(tp) != 0) {
13811 etest->flags |= ETH_TEST_FL_FAILED;
13812 data[TG3_INTERRUPT_TEST] = 1;
13815 tg3_full_lock(tp, 0);
13817 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13818 if (netif_running(dev)) {
13819 tg3_flag_set(tp, INIT_COMPLETE);
13820 err2 = tg3_restart_hw(tp, true);
13822 tg3_netif_start(tp);
13825 tg3_full_unlock(tp);
13827 if (irq_sync && !err2)
13830 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13831 tg3_power_down_prepare(tp);
13835 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13837 struct tg3 *tp = netdev_priv(dev);
13838 struct hwtstamp_config stmpconf;
13840 if (!tg3_flag(tp, PTP_CAPABLE))
13841 return -EOPNOTSUPP;
13843 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13846 if (stmpconf.flags)
13849 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13850 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13853 switch (stmpconf.rx_filter) {
13854 case HWTSTAMP_FILTER_NONE:
13857 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13858 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13859 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13861 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13862 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13863 TG3_RX_PTP_CTL_SYNC_EVNT;
13865 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13866 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13867 TG3_RX_PTP_CTL_DELAY_REQ;
13869 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13870 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13871 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13873 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13874 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13875 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13877 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13878 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13879 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13881 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13882 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13883 TG3_RX_PTP_CTL_SYNC_EVNT;
13885 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13886 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13887 TG3_RX_PTP_CTL_SYNC_EVNT;
13889 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13890 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13891 TG3_RX_PTP_CTL_SYNC_EVNT;
13893 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13894 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13895 TG3_RX_PTP_CTL_DELAY_REQ;
13897 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13898 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13899 TG3_RX_PTP_CTL_DELAY_REQ;
13901 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13902 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13903 TG3_RX_PTP_CTL_DELAY_REQ;
13909 if (netif_running(dev) && tp->rxptpctl)
13910 tw32(TG3_RX_PTP_CTL,
13911 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13913 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13914 tg3_flag_set(tp, TX_TSTAMP_EN);
13916 tg3_flag_clear(tp, TX_TSTAMP_EN);
13918 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13922 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13924 struct tg3 *tp = netdev_priv(dev);
13925 struct hwtstamp_config stmpconf;
13927 if (!tg3_flag(tp, PTP_CAPABLE))
13928 return -EOPNOTSUPP;
13930 stmpconf.flags = 0;
13931 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13932 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13934 switch (tp->rxptpctl) {
13936 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13938 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13939 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13941 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13942 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13944 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13945 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13947 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13948 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13950 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13951 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13953 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13954 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13956 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13957 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13959 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13960 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13962 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13963 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13965 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13966 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13968 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13969 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13971 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13972 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13979 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13983 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13985 struct mii_ioctl_data *data = if_mii(ifr);
13986 struct tg3 *tp = netdev_priv(dev);
13989 if (tg3_flag(tp, USE_PHYLIB)) {
13990 struct phy_device *phydev;
13991 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13993 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13994 return phy_mii_ioctl(phydev, ifr, cmd);
13999 data->phy_id = tp->phy_addr;
14002 case SIOCGMIIREG: {
14005 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14006 break; /* We have no PHY */
14008 if (!netif_running(dev))
14011 spin_lock_bh(&tp->lock);
14012 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14013 data->reg_num & 0x1f, &mii_regval);
14014 spin_unlock_bh(&tp->lock);
14016 data->val_out = mii_regval;
14022 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14023 break; /* We have no PHY */
14025 if (!netif_running(dev))
14028 spin_lock_bh(&tp->lock);
14029 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14030 data->reg_num & 0x1f, data->val_in);
14031 spin_unlock_bh(&tp->lock);
14035 case SIOCSHWTSTAMP:
14036 return tg3_hwtstamp_set(dev, ifr);
14038 case SIOCGHWTSTAMP:
14039 return tg3_hwtstamp_get(dev, ifr);
14045 return -EOPNOTSUPP;
14048 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14050 struct tg3 *tp = netdev_priv(dev);
14052 memcpy(ec, &tp->coal, sizeof(*ec));
14056 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14058 struct tg3 *tp = netdev_priv(dev);
14059 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14060 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14062 if (!tg3_flag(tp, 5705_PLUS)) {
14063 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14064 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14065 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14066 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14069 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14070 (!ec->rx_coalesce_usecs) ||
14071 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14072 (!ec->tx_coalesce_usecs) ||
14073 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14074 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14075 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14076 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14077 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14078 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14079 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14080 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14083 /* Only copy relevant parameters, ignore all others. */
14084 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14085 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14086 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14087 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14088 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14089 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14090 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14091 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14092 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14094 if (netif_running(dev)) {
14095 tg3_full_lock(tp, 0);
14096 __tg3_set_coalesce(tp, &tp->coal);
14097 tg3_full_unlock(tp);
14102 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14104 struct tg3 *tp = netdev_priv(dev);
14106 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14107 netdev_warn(tp->dev, "Board does not support EEE!\n");
14108 return -EOPNOTSUPP;
14111 if (edata->advertised != tp->eee.advertised) {
14112 netdev_warn(tp->dev,
14113 "Direct manipulation of EEE advertisement is not supported\n");
14117 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14118 netdev_warn(tp->dev,
14119 "Maximal Tx Lpi timer supported is %#x(u)\n",
14120 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14126 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14127 tg3_warn_mgmt_link_flap(tp);
14129 if (netif_running(tp->dev)) {
14130 tg3_full_lock(tp, 0);
14133 tg3_full_unlock(tp);
14139 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14141 struct tg3 *tp = netdev_priv(dev);
14143 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14144 netdev_warn(tp->dev,
14145 "Board does not support EEE!\n");
14146 return -EOPNOTSUPP;
14153 static const struct ethtool_ops tg3_ethtool_ops = {
14154 .get_drvinfo = tg3_get_drvinfo,
14155 .get_regs_len = tg3_get_regs_len,
14156 .get_regs = tg3_get_regs,
14157 .get_wol = tg3_get_wol,
14158 .set_wol = tg3_set_wol,
14159 .get_msglevel = tg3_get_msglevel,
14160 .set_msglevel = tg3_set_msglevel,
14161 .nway_reset = tg3_nway_reset,
14162 .get_link = ethtool_op_get_link,
14163 .get_eeprom_len = tg3_get_eeprom_len,
14164 .get_eeprom = tg3_get_eeprom,
14165 .set_eeprom = tg3_set_eeprom,
14166 .get_ringparam = tg3_get_ringparam,
14167 .set_ringparam = tg3_set_ringparam,
14168 .get_pauseparam = tg3_get_pauseparam,
14169 .set_pauseparam = tg3_set_pauseparam,
14170 .self_test = tg3_self_test,
14171 .get_strings = tg3_get_strings,
14172 .set_phys_id = tg3_set_phys_id,
14173 .get_ethtool_stats = tg3_get_ethtool_stats,
14174 .get_coalesce = tg3_get_coalesce,
14175 .set_coalesce = tg3_set_coalesce,
14176 .get_sset_count = tg3_get_sset_count,
14177 .get_rxnfc = tg3_get_rxnfc,
14178 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14179 .get_rxfh = tg3_get_rxfh,
14180 .set_rxfh = tg3_set_rxfh,
14181 .get_channels = tg3_get_channels,
14182 .set_channels = tg3_set_channels,
14183 .get_ts_info = tg3_get_ts_info,
14184 .get_eee = tg3_get_eee,
14185 .set_eee = tg3_set_eee,
14186 .get_link_ksettings = tg3_get_link_ksettings,
14187 .set_link_ksettings = tg3_set_link_ksettings,
14190 static void tg3_get_stats64(struct net_device *dev,
14191 struct rtnl_link_stats64 *stats)
14193 struct tg3 *tp = netdev_priv(dev);
14195 spin_lock_bh(&tp->lock);
14196 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14197 *stats = tp->net_stats_prev;
14198 spin_unlock_bh(&tp->lock);
14202 tg3_get_nstats(tp, stats);
14203 spin_unlock_bh(&tp->lock);
14206 static void tg3_set_rx_mode(struct net_device *dev)
14208 struct tg3 *tp = netdev_priv(dev);
14210 if (!netif_running(dev))
14213 tg3_full_lock(tp, 0);
14214 __tg3_set_rx_mode(dev);
14215 tg3_full_unlock(tp);
14218 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14221 dev->mtu = new_mtu;
14223 if (new_mtu > ETH_DATA_LEN) {
14224 if (tg3_flag(tp, 5780_CLASS)) {
14225 netdev_update_features(dev);
14226 tg3_flag_clear(tp, TSO_CAPABLE);
14228 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14231 if (tg3_flag(tp, 5780_CLASS)) {
14232 tg3_flag_set(tp, TSO_CAPABLE);
14233 netdev_update_features(dev);
14235 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14239 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14241 struct tg3 *tp = netdev_priv(dev);
14243 bool reset_phy = false;
14245 if (!netif_running(dev)) {
14246 /* We'll just catch it later when the
14249 tg3_set_mtu(dev, tp, new_mtu);
14255 tg3_netif_stop(tp);
14257 tg3_set_mtu(dev, tp, new_mtu);
14259 tg3_full_lock(tp, 1);
14261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14263 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14264 * breaks all requests to 256 bytes.
14266 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14267 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14268 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14269 tg3_asic_rev(tp) == ASIC_REV_5720)
14272 err = tg3_restart_hw(tp, reset_phy);
14275 tg3_netif_start(tp);
14277 tg3_full_unlock(tp);
14285 static const struct net_device_ops tg3_netdev_ops = {
14286 .ndo_open = tg3_open,
14287 .ndo_stop = tg3_close,
14288 .ndo_start_xmit = tg3_start_xmit,
14289 .ndo_get_stats64 = tg3_get_stats64,
14290 .ndo_validate_addr = eth_validate_addr,
14291 .ndo_set_rx_mode = tg3_set_rx_mode,
14292 .ndo_set_mac_address = tg3_set_mac_addr,
14293 .ndo_do_ioctl = tg3_ioctl,
14294 .ndo_tx_timeout = tg3_tx_timeout,
14295 .ndo_change_mtu = tg3_change_mtu,
14296 .ndo_fix_features = tg3_fix_features,
14297 .ndo_set_features = tg3_set_features,
14298 #ifdef CONFIG_NET_POLL_CONTROLLER
14299 .ndo_poll_controller = tg3_poll_controller,
14303 static void tg3_get_eeprom_size(struct tg3 *tp)
14305 u32 cursize, val, magic;
14307 tp->nvram_size = EEPROM_CHIP_SIZE;
14309 if (tg3_nvram_read(tp, 0, &magic) != 0)
14312 if ((magic != TG3_EEPROM_MAGIC) &&
14313 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14314 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14318 * Size the chip by reading offsets at increasing powers of two.
14319 * When we encounter our validation signature, we know the addressing
14320 * has wrapped around, and thus have our chip size.
14324 while (cursize < tp->nvram_size) {
14325 if (tg3_nvram_read(tp, cursize, &val) != 0)
14334 tp->nvram_size = cursize;
14337 static void tg3_get_nvram_size(struct tg3 *tp)
14341 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14344 /* Selfboot format */
14345 if (val != TG3_EEPROM_MAGIC) {
14346 tg3_get_eeprom_size(tp);
14350 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14352 /* This is confusing. We want to operate on the
14353 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14354 * call will read from NVRAM and byteswap the data
14355 * according to the byteswapping settings for all
14356 * other register accesses. This ensures the data we
14357 * want will always reside in the lower 16-bits.
14358 * However, the data in NVRAM is in LE format, which
14359 * means the data from the NVRAM read will always be
14360 * opposite the endianness of the CPU. The 16-bit
14361 * byteswap then brings the data to CPU endianness.
14363 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14367 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14370 static void tg3_get_nvram_info(struct tg3 *tp)
14374 nvcfg1 = tr32(NVRAM_CFG1);
14375 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14376 tg3_flag_set(tp, FLASH);
14378 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14379 tw32(NVRAM_CFG1, nvcfg1);
14382 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14383 tg3_flag(tp, 5780_CLASS)) {
14384 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14385 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14386 tp->nvram_jedecnum = JEDEC_ATMEL;
14387 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14388 tg3_flag_set(tp, NVRAM_BUFFERED);
14390 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14391 tp->nvram_jedecnum = JEDEC_ATMEL;
14392 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14394 case FLASH_VENDOR_ATMEL_EEPROM:
14395 tp->nvram_jedecnum = JEDEC_ATMEL;
14396 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14397 tg3_flag_set(tp, NVRAM_BUFFERED);
14399 case FLASH_VENDOR_ST:
14400 tp->nvram_jedecnum = JEDEC_ST;
14401 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14402 tg3_flag_set(tp, NVRAM_BUFFERED);
14404 case FLASH_VENDOR_SAIFUN:
14405 tp->nvram_jedecnum = JEDEC_SAIFUN;
14406 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14408 case FLASH_VENDOR_SST_SMALL:
14409 case FLASH_VENDOR_SST_LARGE:
14410 tp->nvram_jedecnum = JEDEC_SST;
14411 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14415 tp->nvram_jedecnum = JEDEC_ATMEL;
14416 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14417 tg3_flag_set(tp, NVRAM_BUFFERED);
14421 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14423 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14424 case FLASH_5752PAGE_SIZE_256:
14425 tp->nvram_pagesize = 256;
14427 case FLASH_5752PAGE_SIZE_512:
14428 tp->nvram_pagesize = 512;
14430 case FLASH_5752PAGE_SIZE_1K:
14431 tp->nvram_pagesize = 1024;
14433 case FLASH_5752PAGE_SIZE_2K:
14434 tp->nvram_pagesize = 2048;
14436 case FLASH_5752PAGE_SIZE_4K:
14437 tp->nvram_pagesize = 4096;
14439 case FLASH_5752PAGE_SIZE_264:
14440 tp->nvram_pagesize = 264;
14442 case FLASH_5752PAGE_SIZE_528:
14443 tp->nvram_pagesize = 528;
14448 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14452 nvcfg1 = tr32(NVRAM_CFG1);
14454 /* NVRAM protection for TPM */
14455 if (nvcfg1 & (1 << 27))
14456 tg3_flag_set(tp, PROTECTED_NVRAM);
14458 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14459 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14460 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14461 tp->nvram_jedecnum = JEDEC_ATMEL;
14462 tg3_flag_set(tp, NVRAM_BUFFERED);
14464 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14465 tp->nvram_jedecnum = JEDEC_ATMEL;
14466 tg3_flag_set(tp, NVRAM_BUFFERED);
14467 tg3_flag_set(tp, FLASH);
14469 case FLASH_5752VENDOR_ST_M45PE10:
14470 case FLASH_5752VENDOR_ST_M45PE20:
14471 case FLASH_5752VENDOR_ST_M45PE40:
14472 tp->nvram_jedecnum = JEDEC_ST;
14473 tg3_flag_set(tp, NVRAM_BUFFERED);
14474 tg3_flag_set(tp, FLASH);
14478 if (tg3_flag(tp, FLASH)) {
14479 tg3_nvram_get_pagesize(tp, nvcfg1);
14481 /* For eeprom, set pagesize to maximum eeprom size */
14482 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14484 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14485 tw32(NVRAM_CFG1, nvcfg1);
14489 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14491 u32 nvcfg1, protect = 0;
14493 nvcfg1 = tr32(NVRAM_CFG1);
14495 /* NVRAM protection for TPM */
14496 if (nvcfg1 & (1 << 27)) {
14497 tg3_flag_set(tp, PROTECTED_NVRAM);
14501 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14503 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14504 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14505 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14506 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14507 tp->nvram_jedecnum = JEDEC_ATMEL;
14508 tg3_flag_set(tp, NVRAM_BUFFERED);
14509 tg3_flag_set(tp, FLASH);
14510 tp->nvram_pagesize = 264;
14511 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14512 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14513 tp->nvram_size = (protect ? 0x3e200 :
14514 TG3_NVRAM_SIZE_512KB);
14515 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14516 tp->nvram_size = (protect ? 0x1f200 :
14517 TG3_NVRAM_SIZE_256KB);
14519 tp->nvram_size = (protect ? 0x1f200 :
14520 TG3_NVRAM_SIZE_128KB);
14522 case FLASH_5752VENDOR_ST_M45PE10:
14523 case FLASH_5752VENDOR_ST_M45PE20:
14524 case FLASH_5752VENDOR_ST_M45PE40:
14525 tp->nvram_jedecnum = JEDEC_ST;
14526 tg3_flag_set(tp, NVRAM_BUFFERED);
14527 tg3_flag_set(tp, FLASH);
14528 tp->nvram_pagesize = 256;
14529 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14530 tp->nvram_size = (protect ?
14531 TG3_NVRAM_SIZE_64KB :
14532 TG3_NVRAM_SIZE_128KB);
14533 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14534 tp->nvram_size = (protect ?
14535 TG3_NVRAM_SIZE_64KB :
14536 TG3_NVRAM_SIZE_256KB);
14538 tp->nvram_size = (protect ?
14539 TG3_NVRAM_SIZE_128KB :
14540 TG3_NVRAM_SIZE_512KB);
14545 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14549 nvcfg1 = tr32(NVRAM_CFG1);
14551 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14552 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14553 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14554 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14555 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14556 tp->nvram_jedecnum = JEDEC_ATMEL;
14557 tg3_flag_set(tp, NVRAM_BUFFERED);
14558 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14560 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14561 tw32(NVRAM_CFG1, nvcfg1);
14563 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14564 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14565 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14566 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14567 tp->nvram_jedecnum = JEDEC_ATMEL;
14568 tg3_flag_set(tp, NVRAM_BUFFERED);
14569 tg3_flag_set(tp, FLASH);
14570 tp->nvram_pagesize = 264;
14572 case FLASH_5752VENDOR_ST_M45PE10:
14573 case FLASH_5752VENDOR_ST_M45PE20:
14574 case FLASH_5752VENDOR_ST_M45PE40:
14575 tp->nvram_jedecnum = JEDEC_ST;
14576 tg3_flag_set(tp, NVRAM_BUFFERED);
14577 tg3_flag_set(tp, FLASH);
14578 tp->nvram_pagesize = 256;
14583 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14585 u32 nvcfg1, protect = 0;
14587 nvcfg1 = tr32(NVRAM_CFG1);
14589 /* NVRAM protection for TPM */
14590 if (nvcfg1 & (1 << 27)) {
14591 tg3_flag_set(tp, PROTECTED_NVRAM);
14595 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14597 case FLASH_5761VENDOR_ATMEL_ADB021D:
14598 case FLASH_5761VENDOR_ATMEL_ADB041D:
14599 case FLASH_5761VENDOR_ATMEL_ADB081D:
14600 case FLASH_5761VENDOR_ATMEL_ADB161D:
14601 case FLASH_5761VENDOR_ATMEL_MDB021D:
14602 case FLASH_5761VENDOR_ATMEL_MDB041D:
14603 case FLASH_5761VENDOR_ATMEL_MDB081D:
14604 case FLASH_5761VENDOR_ATMEL_MDB161D:
14605 tp->nvram_jedecnum = JEDEC_ATMEL;
14606 tg3_flag_set(tp, NVRAM_BUFFERED);
14607 tg3_flag_set(tp, FLASH);
14608 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14609 tp->nvram_pagesize = 256;
14611 case FLASH_5761VENDOR_ST_A_M45PE20:
14612 case FLASH_5761VENDOR_ST_A_M45PE40:
14613 case FLASH_5761VENDOR_ST_A_M45PE80:
14614 case FLASH_5761VENDOR_ST_A_M45PE16:
14615 case FLASH_5761VENDOR_ST_M_M45PE20:
14616 case FLASH_5761VENDOR_ST_M_M45PE40:
14617 case FLASH_5761VENDOR_ST_M_M45PE80:
14618 case FLASH_5761VENDOR_ST_M_M45PE16:
14619 tp->nvram_jedecnum = JEDEC_ST;
14620 tg3_flag_set(tp, NVRAM_BUFFERED);
14621 tg3_flag_set(tp, FLASH);
14622 tp->nvram_pagesize = 256;
14627 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14630 case FLASH_5761VENDOR_ATMEL_ADB161D:
14631 case FLASH_5761VENDOR_ATMEL_MDB161D:
14632 case FLASH_5761VENDOR_ST_A_M45PE16:
14633 case FLASH_5761VENDOR_ST_M_M45PE16:
14634 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14636 case FLASH_5761VENDOR_ATMEL_ADB081D:
14637 case FLASH_5761VENDOR_ATMEL_MDB081D:
14638 case FLASH_5761VENDOR_ST_A_M45PE80:
14639 case FLASH_5761VENDOR_ST_M_M45PE80:
14640 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14642 case FLASH_5761VENDOR_ATMEL_ADB041D:
14643 case FLASH_5761VENDOR_ATMEL_MDB041D:
14644 case FLASH_5761VENDOR_ST_A_M45PE40:
14645 case FLASH_5761VENDOR_ST_M_M45PE40:
14646 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14648 case FLASH_5761VENDOR_ATMEL_ADB021D:
14649 case FLASH_5761VENDOR_ATMEL_MDB021D:
14650 case FLASH_5761VENDOR_ST_A_M45PE20:
14651 case FLASH_5761VENDOR_ST_M_M45PE20:
14652 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14658 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14660 tp->nvram_jedecnum = JEDEC_ATMEL;
14661 tg3_flag_set(tp, NVRAM_BUFFERED);
14662 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14665 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14669 nvcfg1 = tr32(NVRAM_CFG1);
14671 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14672 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14673 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14674 tp->nvram_jedecnum = JEDEC_ATMEL;
14675 tg3_flag_set(tp, NVRAM_BUFFERED);
14676 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14678 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14679 tw32(NVRAM_CFG1, nvcfg1);
14681 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14682 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14683 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14684 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14685 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14686 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14687 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14688 tp->nvram_jedecnum = JEDEC_ATMEL;
14689 tg3_flag_set(tp, NVRAM_BUFFERED);
14690 tg3_flag_set(tp, FLASH);
14692 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14693 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14694 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14695 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14696 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14698 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14699 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14700 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14702 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14703 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14704 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14708 case FLASH_5752VENDOR_ST_M45PE10:
14709 case FLASH_5752VENDOR_ST_M45PE20:
14710 case FLASH_5752VENDOR_ST_M45PE40:
14711 tp->nvram_jedecnum = JEDEC_ST;
14712 tg3_flag_set(tp, NVRAM_BUFFERED);
14713 tg3_flag_set(tp, FLASH);
14715 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14716 case FLASH_5752VENDOR_ST_M45PE10:
14717 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14719 case FLASH_5752VENDOR_ST_M45PE20:
14720 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14722 case FLASH_5752VENDOR_ST_M45PE40:
14723 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14728 tg3_flag_set(tp, NO_NVRAM);
14732 tg3_nvram_get_pagesize(tp, nvcfg1);
14733 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14734 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14738 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14742 nvcfg1 = tr32(NVRAM_CFG1);
14744 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745 case FLASH_5717VENDOR_ATMEL_EEPROM:
14746 case FLASH_5717VENDOR_MICRO_EEPROM:
14747 tp->nvram_jedecnum = JEDEC_ATMEL;
14748 tg3_flag_set(tp, NVRAM_BUFFERED);
14749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14751 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752 tw32(NVRAM_CFG1, nvcfg1);
14754 case FLASH_5717VENDOR_ATMEL_MDB011D:
14755 case FLASH_5717VENDOR_ATMEL_ADB011B:
14756 case FLASH_5717VENDOR_ATMEL_ADB011D:
14757 case FLASH_5717VENDOR_ATMEL_MDB021D:
14758 case FLASH_5717VENDOR_ATMEL_ADB021B:
14759 case FLASH_5717VENDOR_ATMEL_ADB021D:
14760 case FLASH_5717VENDOR_ATMEL_45USPT:
14761 tp->nvram_jedecnum = JEDEC_ATMEL;
14762 tg3_flag_set(tp, NVRAM_BUFFERED);
14763 tg3_flag_set(tp, FLASH);
14765 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766 case FLASH_5717VENDOR_ATMEL_MDB021D:
14767 /* Detect size with tg3_nvram_get_size() */
14769 case FLASH_5717VENDOR_ATMEL_ADB021B:
14770 case FLASH_5717VENDOR_ATMEL_ADB021D:
14771 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14778 case FLASH_5717VENDOR_ST_M_M25PE10:
14779 case FLASH_5717VENDOR_ST_A_M25PE10:
14780 case FLASH_5717VENDOR_ST_M_M45PE10:
14781 case FLASH_5717VENDOR_ST_A_M45PE10:
14782 case FLASH_5717VENDOR_ST_M_M25PE20:
14783 case FLASH_5717VENDOR_ST_A_M25PE20:
14784 case FLASH_5717VENDOR_ST_M_M45PE20:
14785 case FLASH_5717VENDOR_ST_A_M45PE20:
14786 case FLASH_5717VENDOR_ST_25USPT:
14787 case FLASH_5717VENDOR_ST_45USPT:
14788 tp->nvram_jedecnum = JEDEC_ST;
14789 tg3_flag_set(tp, NVRAM_BUFFERED);
14790 tg3_flag_set(tp, FLASH);
14792 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14793 case FLASH_5717VENDOR_ST_M_M25PE20:
14794 case FLASH_5717VENDOR_ST_M_M45PE20:
14795 /* Detect size with tg3_nvram_get_size() */
14797 case FLASH_5717VENDOR_ST_A_M25PE20:
14798 case FLASH_5717VENDOR_ST_A_M45PE20:
14799 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14802 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14807 tg3_flag_set(tp, NO_NVRAM);
14811 tg3_nvram_get_pagesize(tp, nvcfg1);
14812 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14813 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14816 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14818 u32 nvcfg1, nvmpinstrp, nv_status;
14820 nvcfg1 = tr32(NVRAM_CFG1);
14821 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14823 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14824 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14825 tg3_flag_set(tp, NO_NVRAM);
14829 switch (nvmpinstrp) {
14830 case FLASH_5762_MX25L_100:
14831 case FLASH_5762_MX25L_200:
14832 case FLASH_5762_MX25L_400:
14833 case FLASH_5762_MX25L_800:
14834 case FLASH_5762_MX25L_160_320:
14835 tp->nvram_pagesize = 4096;
14836 tp->nvram_jedecnum = JEDEC_MACRONIX;
14837 tg3_flag_set(tp, NVRAM_BUFFERED);
14838 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14839 tg3_flag_set(tp, FLASH);
14840 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14842 (1 << (nv_status >> AUTOSENSE_DEVID &
14843 AUTOSENSE_DEVID_MASK)
14844 << AUTOSENSE_SIZE_IN_MB);
14847 case FLASH_5762_EEPROM_HD:
14848 nvmpinstrp = FLASH_5720_EEPROM_HD;
14850 case FLASH_5762_EEPROM_LD:
14851 nvmpinstrp = FLASH_5720_EEPROM_LD;
14853 case FLASH_5720VENDOR_M_ST_M45PE20:
14854 /* This pinstrap supports multiple sizes, so force it
14855 * to read the actual size from location 0xf0.
14857 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14862 switch (nvmpinstrp) {
14863 case FLASH_5720_EEPROM_HD:
14864 case FLASH_5720_EEPROM_LD:
14865 tp->nvram_jedecnum = JEDEC_ATMEL;
14866 tg3_flag_set(tp, NVRAM_BUFFERED);
14868 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14869 tw32(NVRAM_CFG1, nvcfg1);
14870 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14871 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14873 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14875 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14876 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14877 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14878 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14879 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14880 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14881 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14882 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14883 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14884 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14885 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14886 case FLASH_5720VENDOR_ATMEL_45USPT:
14887 tp->nvram_jedecnum = JEDEC_ATMEL;
14888 tg3_flag_set(tp, NVRAM_BUFFERED);
14889 tg3_flag_set(tp, FLASH);
14891 switch (nvmpinstrp) {
14892 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14893 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14894 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14895 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14897 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14898 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14899 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14900 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14902 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14903 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14904 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14907 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14908 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14912 case FLASH_5720VENDOR_M_ST_M25PE10:
14913 case FLASH_5720VENDOR_M_ST_M45PE10:
14914 case FLASH_5720VENDOR_A_ST_M25PE10:
14915 case FLASH_5720VENDOR_A_ST_M45PE10:
14916 case FLASH_5720VENDOR_M_ST_M25PE20:
14917 case FLASH_5720VENDOR_M_ST_M45PE20:
14918 case FLASH_5720VENDOR_A_ST_M25PE20:
14919 case FLASH_5720VENDOR_A_ST_M45PE20:
14920 case FLASH_5720VENDOR_M_ST_M25PE40:
14921 case FLASH_5720VENDOR_M_ST_M45PE40:
14922 case FLASH_5720VENDOR_A_ST_M25PE40:
14923 case FLASH_5720VENDOR_A_ST_M45PE40:
14924 case FLASH_5720VENDOR_M_ST_M25PE80:
14925 case FLASH_5720VENDOR_M_ST_M45PE80:
14926 case FLASH_5720VENDOR_A_ST_M25PE80:
14927 case FLASH_5720VENDOR_A_ST_M45PE80:
14928 case FLASH_5720VENDOR_ST_25USPT:
14929 case FLASH_5720VENDOR_ST_45USPT:
14930 tp->nvram_jedecnum = JEDEC_ST;
14931 tg3_flag_set(tp, NVRAM_BUFFERED);
14932 tg3_flag_set(tp, FLASH);
14934 switch (nvmpinstrp) {
14935 case FLASH_5720VENDOR_M_ST_M25PE20:
14936 case FLASH_5720VENDOR_M_ST_M45PE20:
14937 case FLASH_5720VENDOR_A_ST_M25PE20:
14938 case FLASH_5720VENDOR_A_ST_M45PE20:
14939 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14941 case FLASH_5720VENDOR_M_ST_M25PE40:
14942 case FLASH_5720VENDOR_M_ST_M45PE40:
14943 case FLASH_5720VENDOR_A_ST_M25PE40:
14944 case FLASH_5720VENDOR_A_ST_M45PE40:
14945 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14947 case FLASH_5720VENDOR_M_ST_M25PE80:
14948 case FLASH_5720VENDOR_M_ST_M45PE80:
14949 case FLASH_5720VENDOR_A_ST_M25PE80:
14950 case FLASH_5720VENDOR_A_ST_M45PE80:
14951 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14954 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14960 tg3_flag_set(tp, NO_NVRAM);
14964 tg3_nvram_get_pagesize(tp, nvcfg1);
14965 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14966 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14968 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14971 if (tg3_nvram_read(tp, 0, &val))
14974 if (val != TG3_EEPROM_MAGIC &&
14975 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14976 tg3_flag_set(tp, NO_NVRAM);
14980 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14981 static void tg3_nvram_init(struct tg3 *tp)
14983 if (tg3_flag(tp, IS_SSB_CORE)) {
14984 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14985 tg3_flag_clear(tp, NVRAM);
14986 tg3_flag_clear(tp, NVRAM_BUFFERED);
14987 tg3_flag_set(tp, NO_NVRAM);
14991 tw32_f(GRC_EEPROM_ADDR,
14992 (EEPROM_ADDR_FSM_RESET |
14993 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14994 EEPROM_ADDR_CLKPERD_SHIFT)));
14998 /* Enable seeprom accesses. */
14999 tw32_f(GRC_LOCAL_CTRL,
15000 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15003 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15004 tg3_asic_rev(tp) != ASIC_REV_5701) {
15005 tg3_flag_set(tp, NVRAM);
15007 if (tg3_nvram_lock(tp)) {
15008 netdev_warn(tp->dev,
15009 "Cannot get nvram lock, %s failed\n",
15013 tg3_enable_nvram_access(tp);
15015 tp->nvram_size = 0;
15017 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15018 tg3_get_5752_nvram_info(tp);
15019 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15020 tg3_get_5755_nvram_info(tp);
15021 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15022 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15023 tg3_asic_rev(tp) == ASIC_REV_5785)
15024 tg3_get_5787_nvram_info(tp);
15025 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15026 tg3_get_5761_nvram_info(tp);
15027 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15028 tg3_get_5906_nvram_info(tp);
15029 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15030 tg3_flag(tp, 57765_CLASS))
15031 tg3_get_57780_nvram_info(tp);
15032 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15033 tg3_asic_rev(tp) == ASIC_REV_5719)
15034 tg3_get_5717_nvram_info(tp);
15035 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15036 tg3_asic_rev(tp) == ASIC_REV_5762)
15037 tg3_get_5720_nvram_info(tp);
15039 tg3_get_nvram_info(tp);
15041 if (tp->nvram_size == 0)
15042 tg3_get_nvram_size(tp);
15044 tg3_disable_nvram_access(tp);
15045 tg3_nvram_unlock(tp);
15048 tg3_flag_clear(tp, NVRAM);
15049 tg3_flag_clear(tp, NVRAM_BUFFERED);
15051 tg3_get_eeprom_size(tp);
15055 struct subsys_tbl_ent {
15056 u16 subsys_vendor, subsys_devid;
15060 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15061 /* Broadcom boards. */
15062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15086 { TG3PCI_SUBVENDOR_ID_3COM,
15087 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15088 { TG3PCI_SUBVENDOR_ID_3COM,
15089 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15090 { TG3PCI_SUBVENDOR_ID_3COM,
15091 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15092 { TG3PCI_SUBVENDOR_ID_3COM,
15093 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15094 { TG3PCI_SUBVENDOR_ID_3COM,
15095 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15098 { TG3PCI_SUBVENDOR_ID_DELL,
15099 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15100 { TG3PCI_SUBVENDOR_ID_DELL,
15101 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15102 { TG3PCI_SUBVENDOR_ID_DELL,
15103 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15104 { TG3PCI_SUBVENDOR_ID_DELL,
15105 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15107 /* Compaq boards. */
15108 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15109 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15110 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15111 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15112 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15113 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15114 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15115 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15116 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15117 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15120 { TG3PCI_SUBVENDOR_ID_IBM,
15121 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15124 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15128 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15129 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15130 tp->pdev->subsystem_vendor) &&
15131 (subsys_id_to_phy_id[i].subsys_devid ==
15132 tp->pdev->subsystem_device))
15133 return &subsys_id_to_phy_id[i];
15138 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15142 tp->phy_id = TG3_PHY_ID_INVALID;
15143 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15145 /* Assume an onboard device and WOL capable by default. */
15146 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15147 tg3_flag_set(tp, WOL_CAP);
15149 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15150 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15151 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15152 tg3_flag_set(tp, IS_NIC);
15154 val = tr32(VCPU_CFGSHDW);
15155 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15156 tg3_flag_set(tp, ASPM_WORKAROUND);
15157 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15158 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15159 tg3_flag_set(tp, WOL_ENABLE);
15160 device_set_wakeup_enable(&tp->pdev->dev, true);
15165 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15166 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15167 u32 nic_cfg, led_cfg;
15168 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15169 u32 nic_phy_id, ver, eeprom_phy_id;
15170 int eeprom_phy_serdes = 0;
15172 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15173 tp->nic_sram_data_cfg = nic_cfg;
15175 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15176 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15177 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15178 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15179 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15180 (ver > 0) && (ver < 0x100))
15181 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15183 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15184 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15186 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15187 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15188 tg3_asic_rev(tp) == ASIC_REV_5720)
15189 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15191 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15192 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15193 eeprom_phy_serdes = 1;
15195 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15196 if (nic_phy_id != 0) {
15197 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15198 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15200 eeprom_phy_id = (id1 >> 16) << 10;
15201 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15202 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15206 tp->phy_id = eeprom_phy_id;
15207 if (eeprom_phy_serdes) {
15208 if (!tg3_flag(tp, 5705_PLUS))
15209 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15211 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15214 if (tg3_flag(tp, 5750_PLUS))
15215 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15216 SHASTA_EXT_LED_MODE_MASK);
15218 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15222 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15223 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15226 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15227 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15230 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15231 tp->led_ctrl = LED_CTRL_MODE_MAC;
15233 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15234 * read on some older 5700/5701 bootcode.
15236 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5701)
15238 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15242 case SHASTA_EXT_LED_SHARED:
15243 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15244 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15245 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15246 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15247 LED_CTRL_MODE_PHY_2);
15249 if (tg3_flag(tp, 5717_PLUS) ||
15250 tg3_asic_rev(tp) == ASIC_REV_5762)
15251 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15252 LED_CTRL_BLINK_RATE_MASK;
15256 case SHASTA_EXT_LED_MAC:
15257 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15260 case SHASTA_EXT_LED_COMBO:
15261 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15262 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15263 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15264 LED_CTRL_MODE_PHY_2);
15269 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15270 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15271 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15272 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15274 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15275 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15277 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15278 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15279 if ((tp->pdev->subsystem_vendor ==
15280 PCI_VENDOR_ID_ARIMA) &&
15281 (tp->pdev->subsystem_device == 0x205a ||
15282 tp->pdev->subsystem_device == 0x2063))
15283 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15285 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15286 tg3_flag_set(tp, IS_NIC);
15289 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15290 tg3_flag_set(tp, ENABLE_ASF);
15291 if (tg3_flag(tp, 5750_PLUS))
15292 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15295 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15296 tg3_flag(tp, 5750_PLUS))
15297 tg3_flag_set(tp, ENABLE_APE);
15299 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15300 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15301 tg3_flag_clear(tp, WOL_CAP);
15303 if (tg3_flag(tp, WOL_CAP) &&
15304 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15305 tg3_flag_set(tp, WOL_ENABLE);
15306 device_set_wakeup_enable(&tp->pdev->dev, true);
15309 if (cfg2 & (1 << 17))
15310 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15312 /* serdes signal pre-emphasis in register 0x590 set by */
15313 /* bootcode if bit 18 is set */
15314 if (cfg2 & (1 << 18))
15315 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15317 if ((tg3_flag(tp, 57765_PLUS) ||
15318 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15319 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15320 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15321 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15323 if (tg3_flag(tp, PCI_EXPRESS)) {
15326 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15327 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15328 !tg3_flag(tp, 57765_PLUS) &&
15329 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15330 tg3_flag_set(tp, ASPM_WORKAROUND);
15331 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15332 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15333 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15334 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15337 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15338 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15339 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15340 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15341 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15342 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15344 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15345 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15348 if (tg3_flag(tp, WOL_CAP))
15349 device_set_wakeup_enable(&tp->pdev->dev,
15350 tg3_flag(tp, WOL_ENABLE));
15352 device_set_wakeup_capable(&tp->pdev->dev, false);
15355 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15358 u32 val2, off = offset * 8;
15360 err = tg3_nvram_lock(tp);
15364 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15365 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15366 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15367 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15370 for (i = 0; i < 100; i++) {
15371 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15372 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15373 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15379 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15381 tg3_nvram_unlock(tp);
15382 if (val2 & APE_OTP_STATUS_CMD_DONE)
15388 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15393 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15394 tw32(OTP_CTRL, cmd);
15396 /* Wait for up to 1 ms for command to execute. */
15397 for (i = 0; i < 100; i++) {
15398 val = tr32(OTP_STATUS);
15399 if (val & OTP_STATUS_CMD_DONE)
15404 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15407 /* Read the gphy configuration from the OTP region of the chip. The gphy
15408 * configuration is a 32-bit value that straddles the alignment boundary.
15409 * We do two 32-bit reads and then shift and merge the results.
15411 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15413 u32 bhalf_otp, thalf_otp;
15415 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15417 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15420 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15422 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15425 thalf_otp = tr32(OTP_READ_DATA);
15427 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15429 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15432 bhalf_otp = tr32(OTP_READ_DATA);
15434 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15437 static void tg3_phy_init_link_config(struct tg3 *tp)
15439 u32 adv = ADVERTISED_Autoneg;
15441 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15442 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15443 adv |= ADVERTISED_1000baseT_Half;
15444 adv |= ADVERTISED_1000baseT_Full;
15447 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15448 adv |= ADVERTISED_100baseT_Half |
15449 ADVERTISED_100baseT_Full |
15450 ADVERTISED_10baseT_Half |
15451 ADVERTISED_10baseT_Full |
15454 adv |= ADVERTISED_FIBRE;
15456 tp->link_config.advertising = adv;
15457 tp->link_config.speed = SPEED_UNKNOWN;
15458 tp->link_config.duplex = DUPLEX_UNKNOWN;
15459 tp->link_config.autoneg = AUTONEG_ENABLE;
15460 tp->link_config.active_speed = SPEED_UNKNOWN;
15461 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15466 static int tg3_phy_probe(struct tg3 *tp)
15468 u32 hw_phy_id_1, hw_phy_id_2;
15469 u32 hw_phy_id, hw_phy_id_masked;
15472 /* flow control autonegotiation is default behavior */
15473 tg3_flag_set(tp, PAUSE_AUTONEG);
15474 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15476 if (tg3_flag(tp, ENABLE_APE)) {
15477 switch (tp->pci_fn) {
15479 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15482 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15485 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15488 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15493 if (!tg3_flag(tp, ENABLE_ASF) &&
15494 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15495 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15496 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15497 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15499 if (tg3_flag(tp, USE_PHYLIB))
15500 return tg3_phy_init(tp);
15502 /* Reading the PHY ID register can conflict with ASF
15503 * firmware access to the PHY hardware.
15506 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15507 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15509 /* Now read the physical PHY_ID from the chip and verify
15510 * that it is sane. If it doesn't look good, we fall back
15511 * to either the hard-coded table based PHY_ID and failing
15512 * that the value found in the eeprom area.
15514 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15515 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15517 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15518 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15519 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15521 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15524 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15525 tp->phy_id = hw_phy_id;
15526 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15527 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15529 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15531 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15532 /* Do nothing, phy ID already set up in
15533 * tg3_get_eeprom_hw_cfg().
15536 struct subsys_tbl_ent *p;
15538 /* No eeprom signature? Try the hardcoded
15539 * subsys device table.
15541 p = tg3_lookup_by_subsys(tp);
15543 tp->phy_id = p->phy_id;
15544 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15545 /* For now we saw the IDs 0xbc050cd0,
15546 * 0xbc050f80 and 0xbc050c30 on devices
15547 * connected to an BCM4785 and there are
15548 * probably more. Just assume that the phy is
15549 * supported when it is connected to a SSB core
15556 tp->phy_id == TG3_PHY_ID_BCM8002)
15557 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15561 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15562 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15563 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15564 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15565 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15566 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15567 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15568 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15569 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15570 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15572 tp->eee.supported = SUPPORTED_100baseT_Full |
15573 SUPPORTED_1000baseT_Full;
15574 tp->eee.advertised = ADVERTISED_100baseT_Full |
15575 ADVERTISED_1000baseT_Full;
15576 tp->eee.eee_enabled = 1;
15577 tp->eee.tx_lpi_enabled = 1;
15578 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15581 tg3_phy_init_link_config(tp);
15583 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15584 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15585 !tg3_flag(tp, ENABLE_APE) &&
15586 !tg3_flag(tp, ENABLE_ASF)) {
15589 tg3_readphy(tp, MII_BMSR, &bmsr);
15590 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15591 (bmsr & BMSR_LSTATUS))
15592 goto skip_phy_reset;
15594 err = tg3_phy_reset(tp);
15598 tg3_phy_set_wirespeed(tp);
15600 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15601 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15602 tp->link_config.flowctrl);
15604 tg3_writephy(tp, MII_BMCR,
15605 BMCR_ANENABLE | BMCR_ANRESTART);
15610 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15611 err = tg3_init_5401phy_dsp(tp);
15615 err = tg3_init_5401phy_dsp(tp);
15621 static void tg3_read_vpd(struct tg3 *tp)
15624 unsigned int block_end, rosize, len;
15628 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15632 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15634 goto out_not_found;
15636 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15637 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15638 i += PCI_VPD_LRDT_TAG_SIZE;
15640 if (block_end > vpdlen)
15641 goto out_not_found;
15643 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15644 PCI_VPD_RO_KEYWORD_MFR_ID);
15646 len = pci_vpd_info_field_size(&vpd_data[j]);
15648 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15649 if (j + len > block_end || len != 4 ||
15650 memcmp(&vpd_data[j], "1028", 4))
15653 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654 PCI_VPD_RO_KEYWORD_VENDOR0);
15658 len = pci_vpd_info_field_size(&vpd_data[j]);
15660 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15661 if (j + len > block_end)
15664 if (len >= sizeof(tp->fw_ver))
15665 len = sizeof(tp->fw_ver) - 1;
15666 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15667 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15672 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15673 PCI_VPD_RO_KEYWORD_PARTNO);
15675 goto out_not_found;
15677 len = pci_vpd_info_field_size(&vpd_data[i]);
15679 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15680 if (len > TG3_BPN_SIZE ||
15681 (len + i) > vpdlen)
15682 goto out_not_found;
15684 memcpy(tp->board_part_number, &vpd_data[i], len);
15688 if (tp->board_part_number[0])
15692 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15693 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15695 strcpy(tp->board_part_number, "BCM5717");
15696 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15697 strcpy(tp->board_part_number, "BCM5718");
15700 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15701 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15702 strcpy(tp->board_part_number, "BCM57780");
15703 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15704 strcpy(tp->board_part_number, "BCM57760");
15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15706 strcpy(tp->board_part_number, "BCM57790");
15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15708 strcpy(tp->board_part_number, "BCM57788");
15711 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15712 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15713 strcpy(tp->board_part_number, "BCM57761");
15714 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15715 strcpy(tp->board_part_number, "BCM57765");
15716 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15717 strcpy(tp->board_part_number, "BCM57781");
15718 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15719 strcpy(tp->board_part_number, "BCM57785");
15720 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15721 strcpy(tp->board_part_number, "BCM57791");
15722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15723 strcpy(tp->board_part_number, "BCM57795");
15726 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15727 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15728 strcpy(tp->board_part_number, "BCM57762");
15729 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15730 strcpy(tp->board_part_number, "BCM57766");
15731 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15732 strcpy(tp->board_part_number, "BCM57782");
15733 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15734 strcpy(tp->board_part_number, "BCM57786");
15737 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15738 strcpy(tp->board_part_number, "BCM95906");
15741 strcpy(tp->board_part_number, "none");
15745 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15749 if (tg3_nvram_read(tp, offset, &val) ||
15750 (val & 0xfc000000) != 0x0c000000 ||
15751 tg3_nvram_read(tp, offset + 4, &val) ||
15758 static void tg3_read_bc_ver(struct tg3 *tp)
15760 u32 val, offset, start, ver_offset;
15762 bool newver = false;
15764 if (tg3_nvram_read(tp, 0xc, &offset) ||
15765 tg3_nvram_read(tp, 0x4, &start))
15768 offset = tg3_nvram_logical_addr(tp, offset);
15770 if (tg3_nvram_read(tp, offset, &val))
15773 if ((val & 0xfc000000) == 0x0c000000) {
15774 if (tg3_nvram_read(tp, offset + 4, &val))
15781 dst_off = strlen(tp->fw_ver);
15784 if (TG3_VER_SIZE - dst_off < 16 ||
15785 tg3_nvram_read(tp, offset + 8, &ver_offset))
15788 offset = offset + ver_offset - start;
15789 for (i = 0; i < 16; i += 4) {
15791 if (tg3_nvram_read_be32(tp, offset + i, &v))
15794 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15799 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15802 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15803 TG3_NVM_BCVER_MAJSFT;
15804 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15805 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15806 "v%d.%02d", major, minor);
15810 static void tg3_read_hwsb_ver(struct tg3 *tp)
15812 u32 val, major, minor;
15814 /* Use native endian representation */
15815 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15818 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15819 TG3_NVM_HWSB_CFG1_MAJSFT;
15820 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15821 TG3_NVM_HWSB_CFG1_MINSFT;
15823 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15826 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15828 u32 offset, major, minor, build;
15830 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15832 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15835 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15836 case TG3_EEPROM_SB_REVISION_0:
15837 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15839 case TG3_EEPROM_SB_REVISION_2:
15840 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15842 case TG3_EEPROM_SB_REVISION_3:
15843 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15845 case TG3_EEPROM_SB_REVISION_4:
15846 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15848 case TG3_EEPROM_SB_REVISION_5:
15849 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15851 case TG3_EEPROM_SB_REVISION_6:
15852 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15858 if (tg3_nvram_read(tp, offset, &val))
15861 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15862 TG3_EEPROM_SB_EDH_BLD_SHFT;
15863 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15864 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15865 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15867 if (minor > 99 || build > 26)
15870 offset = strlen(tp->fw_ver);
15871 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15872 " v%d.%02d", major, minor);
15875 offset = strlen(tp->fw_ver);
15876 if (offset < TG3_VER_SIZE - 1)
15877 tp->fw_ver[offset] = 'a' + build - 1;
15881 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15883 u32 val, offset, start;
15886 for (offset = TG3_NVM_DIR_START;
15887 offset < TG3_NVM_DIR_END;
15888 offset += TG3_NVM_DIRENT_SIZE) {
15889 if (tg3_nvram_read(tp, offset, &val))
15892 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15896 if (offset == TG3_NVM_DIR_END)
15899 if (!tg3_flag(tp, 5705_PLUS))
15900 start = 0x08000000;
15901 else if (tg3_nvram_read(tp, offset - 4, &start))
15904 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15905 !tg3_fw_img_is_valid(tp, offset) ||
15906 tg3_nvram_read(tp, offset + 8, &val))
15909 offset += val - start;
15911 vlen = strlen(tp->fw_ver);
15913 tp->fw_ver[vlen++] = ',';
15914 tp->fw_ver[vlen++] = ' ';
15916 for (i = 0; i < 4; i++) {
15918 if (tg3_nvram_read_be32(tp, offset, &v))
15921 offset += sizeof(v);
15923 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15924 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15928 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15933 static void tg3_probe_ncsi(struct tg3 *tp)
15937 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15938 if (apedata != APE_SEG_SIG_MAGIC)
15941 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15942 if (!(apedata & APE_FW_STATUS_READY))
15945 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15946 tg3_flag_set(tp, APE_HAS_NCSI);
15949 static void tg3_read_dash_ver(struct tg3 *tp)
15955 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15957 if (tg3_flag(tp, APE_HAS_NCSI))
15959 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15964 vlen = strlen(tp->fw_ver);
15966 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15968 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15969 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15970 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15971 (apedata & APE_FW_VERSION_BLDMSK));
15974 static void tg3_read_otp_ver(struct tg3 *tp)
15978 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15981 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15982 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15983 TG3_OTP_MAGIC0_VALID(val)) {
15984 u64 val64 = (u64) val << 32 | val2;
15988 for (i = 0; i < 7; i++) {
15989 if ((val64 & 0xff) == 0)
15991 ver = val64 & 0xff;
15994 vlen = strlen(tp->fw_ver);
15995 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15999 static void tg3_read_fw_ver(struct tg3 *tp)
16002 bool vpd_vers = false;
16004 if (tp->fw_ver[0] != 0)
16007 if (tg3_flag(tp, NO_NVRAM)) {
16008 strcat(tp->fw_ver, "sb");
16009 tg3_read_otp_ver(tp);
16013 if (tg3_nvram_read(tp, 0, &val))
16016 if (val == TG3_EEPROM_MAGIC)
16017 tg3_read_bc_ver(tp);
16018 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16019 tg3_read_sb_ver(tp, val);
16020 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16021 tg3_read_hwsb_ver(tp);
16023 if (tg3_flag(tp, ENABLE_ASF)) {
16024 if (tg3_flag(tp, ENABLE_APE)) {
16025 tg3_probe_ncsi(tp);
16027 tg3_read_dash_ver(tp);
16028 } else if (!vpd_vers) {
16029 tg3_read_mgmtfw_ver(tp);
16033 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16036 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16038 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16039 return TG3_RX_RET_MAX_SIZE_5717;
16040 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16041 return TG3_RX_RET_MAX_SIZE_5700;
16043 return TG3_RX_RET_MAX_SIZE_5705;
16046 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16047 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16048 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16049 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16053 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16055 struct pci_dev *peer;
16056 unsigned int func, devnr = tp->pdev->devfn & ~7;
16058 for (func = 0; func < 8; func++) {
16059 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16060 if (peer && peer != tp->pdev)
16064 /* 5704 can be configured in single-port mode, set peer to
16065 * tp->pdev in that case.
16073 * We don't need to keep the refcount elevated; there's no way
16074 * to remove one half of this device without removing the other
16081 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16083 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16084 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16087 /* All devices that use the alternate
16088 * ASIC REV location have a CPMU.
16090 tg3_flag_set(tp, CPMU_PRESENT);
16092 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16103 reg = TG3PCI_GEN2_PRODID_ASICREV;
16104 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16105 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16106 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16113 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16114 reg = TG3PCI_GEN15_PRODID_ASICREV;
16116 reg = TG3PCI_PRODID_ASICREV;
16118 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16121 /* Wrong chip ID in 5752 A0. This code can be removed later
16122 * as A0 is not in production.
16124 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16125 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16127 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16128 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16130 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16131 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5720)
16133 tg3_flag_set(tp, 5717_PLUS);
16135 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16136 tg3_asic_rev(tp) == ASIC_REV_57766)
16137 tg3_flag_set(tp, 57765_CLASS);
16139 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16140 tg3_asic_rev(tp) == ASIC_REV_5762)
16141 tg3_flag_set(tp, 57765_PLUS);
16143 /* Intentionally exclude ASIC_REV_5906 */
16144 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16145 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16146 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16147 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16148 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16149 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16150 tg3_flag(tp, 57765_PLUS))
16151 tg3_flag_set(tp, 5755_PLUS);
16153 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16154 tg3_asic_rev(tp) == ASIC_REV_5714)
16155 tg3_flag_set(tp, 5780_CLASS);
16157 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16158 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16159 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16160 tg3_flag(tp, 5755_PLUS) ||
16161 tg3_flag(tp, 5780_CLASS))
16162 tg3_flag_set(tp, 5750_PLUS);
16164 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16165 tg3_flag(tp, 5750_PLUS))
16166 tg3_flag_set(tp, 5705_PLUS);
16169 static bool tg3_10_100_only_device(struct tg3 *tp,
16170 const struct pci_device_id *ent)
16172 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16174 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16175 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16176 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16179 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16180 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16181 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16191 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16194 u32 pci_state_reg, grc_misc_cfg;
16199 /* Force memory write invalidate off. If we leave it on,
16200 * then on 5700_BX chips we have to enable a workaround.
16201 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16202 * to match the cacheline size. The Broadcom driver have this
16203 * workaround but turns MWI off all the times so never uses
16204 * it. This seems to suggest that the workaround is insufficient.
16206 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16207 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16208 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16210 /* Important! -- Make sure register accesses are byteswapped
16211 * correctly. Also, for those chips that require it, make
16212 * sure that indirect register accesses are enabled before
16213 * the first operation.
16215 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16217 tp->misc_host_ctrl |= (misc_ctrl_reg &
16218 MISC_HOST_CTRL_CHIPREV);
16219 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16220 tp->misc_host_ctrl);
16222 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16224 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16225 * we need to disable memory and use config. cycles
16226 * only to access all registers. The 5702/03 chips
16227 * can mistakenly decode the special cycles from the
16228 * ICH chipsets as memory write cycles, causing corruption
16229 * of register and memory space. Only certain ICH bridges
16230 * will drive special cycles with non-zero data during the
16231 * address phase which can fall within the 5703's address
16232 * range. This is not an ICH bug as the PCI spec allows
16233 * non-zero address during special cycles. However, only
16234 * these ICH bridges are known to drive non-zero addresses
16235 * during special cycles.
16237 * Since special cycles do not cross PCI bridges, we only
16238 * enable this workaround if the 5703 is on the secondary
16239 * bus of these ICH bridges.
16241 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16242 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16243 static struct tg3_dev_id {
16247 } ich_chipsets[] = {
16248 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16250 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16252 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16254 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16258 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16259 struct pci_dev *bridge = NULL;
16261 while (pci_id->vendor != 0) {
16262 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16268 if (pci_id->rev != PCI_ANY_ID) {
16269 if (bridge->revision > pci_id->rev)
16272 if (bridge->subordinate &&
16273 (bridge->subordinate->number ==
16274 tp->pdev->bus->number)) {
16275 tg3_flag_set(tp, ICH_WORKAROUND);
16276 pci_dev_put(bridge);
16282 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16283 static struct tg3_dev_id {
16286 } bridge_chipsets[] = {
16287 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16288 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16291 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16292 struct pci_dev *bridge = NULL;
16294 while (pci_id->vendor != 0) {
16295 bridge = pci_get_device(pci_id->vendor,
16302 if (bridge->subordinate &&
16303 (bridge->subordinate->number <=
16304 tp->pdev->bus->number) &&
16305 (bridge->subordinate->busn_res.end >=
16306 tp->pdev->bus->number)) {
16307 tg3_flag_set(tp, 5701_DMA_BUG);
16308 pci_dev_put(bridge);
16314 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16315 * DMA addresses > 40-bit. This bridge may have other additional
16316 * 57xx devices behind it in some 4-port NIC designs for example.
16317 * Any tg3 device found behind the bridge will also need the 40-bit
16320 if (tg3_flag(tp, 5780_CLASS)) {
16321 tg3_flag_set(tp, 40BIT_DMA_BUG);
16322 tp->msi_cap = tp->pdev->msi_cap;
16324 struct pci_dev *bridge = NULL;
16327 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16328 PCI_DEVICE_ID_SERVERWORKS_EPB,
16330 if (bridge && bridge->subordinate &&
16331 (bridge->subordinate->number <=
16332 tp->pdev->bus->number) &&
16333 (bridge->subordinate->busn_res.end >=
16334 tp->pdev->bus->number)) {
16335 tg3_flag_set(tp, 40BIT_DMA_BUG);
16336 pci_dev_put(bridge);
16342 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16343 tg3_asic_rev(tp) == ASIC_REV_5714)
16344 tp->pdev_peer = tg3_find_peer(tp);
16346 /* Determine TSO capabilities */
16347 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16348 ; /* Do nothing. HW bug. */
16349 else if (tg3_flag(tp, 57765_PLUS))
16350 tg3_flag_set(tp, HW_TSO_3);
16351 else if (tg3_flag(tp, 5755_PLUS) ||
16352 tg3_asic_rev(tp) == ASIC_REV_5906)
16353 tg3_flag_set(tp, HW_TSO_2);
16354 else if (tg3_flag(tp, 5750_PLUS)) {
16355 tg3_flag_set(tp, HW_TSO_1);
16356 tg3_flag_set(tp, TSO_BUG);
16357 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16358 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16359 tg3_flag_clear(tp, TSO_BUG);
16360 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16361 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16362 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16363 tg3_flag_set(tp, FW_TSO);
16364 tg3_flag_set(tp, TSO_BUG);
16365 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16366 tp->fw_needed = FIRMWARE_TG3TSO5;
16368 tp->fw_needed = FIRMWARE_TG3TSO;
16371 /* Selectively allow TSO based on operating conditions */
16372 if (tg3_flag(tp, HW_TSO_1) ||
16373 tg3_flag(tp, HW_TSO_2) ||
16374 tg3_flag(tp, HW_TSO_3) ||
16375 tg3_flag(tp, FW_TSO)) {
16376 /* For firmware TSO, assume ASF is disabled.
16377 * We'll disable TSO later if we discover ASF
16378 * is enabled in tg3_get_eeprom_hw_cfg().
16380 tg3_flag_set(tp, TSO_CAPABLE);
16382 tg3_flag_clear(tp, TSO_CAPABLE);
16383 tg3_flag_clear(tp, TSO_BUG);
16384 tp->fw_needed = NULL;
16387 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16388 tp->fw_needed = FIRMWARE_TG3;
16390 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16391 tp->fw_needed = FIRMWARE_TG357766;
16395 if (tg3_flag(tp, 5750_PLUS)) {
16396 tg3_flag_set(tp, SUPPORT_MSI);
16397 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16398 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16399 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16400 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16401 tp->pdev_peer == tp->pdev))
16402 tg3_flag_clear(tp, SUPPORT_MSI);
16404 if (tg3_flag(tp, 5755_PLUS) ||
16405 tg3_asic_rev(tp) == ASIC_REV_5906) {
16406 tg3_flag_set(tp, 1SHOT_MSI);
16409 if (tg3_flag(tp, 57765_PLUS)) {
16410 tg3_flag_set(tp, SUPPORT_MSIX);
16411 tp->irq_max = TG3_IRQ_MAX_VECS;
16417 if (tp->irq_max > 1) {
16418 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16419 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16421 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16422 tg3_asic_rev(tp) == ASIC_REV_5720)
16423 tp->txq_max = tp->irq_max - 1;
16426 if (tg3_flag(tp, 5755_PLUS) ||
16427 tg3_asic_rev(tp) == ASIC_REV_5906)
16428 tg3_flag_set(tp, SHORT_DMA_BUG);
16430 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16431 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16433 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16434 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16435 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16436 tg3_asic_rev(tp) == ASIC_REV_5762)
16437 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16439 if (tg3_flag(tp, 57765_PLUS) &&
16440 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16441 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16443 if (!tg3_flag(tp, 5705_PLUS) ||
16444 tg3_flag(tp, 5780_CLASS) ||
16445 tg3_flag(tp, USE_JUMBO_BDFLAG))
16446 tg3_flag_set(tp, JUMBO_CAPABLE);
16448 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16451 if (pci_is_pcie(tp->pdev)) {
16454 tg3_flag_set(tp, PCI_EXPRESS);
16456 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16457 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16458 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16459 tg3_flag_clear(tp, HW_TSO_2);
16460 tg3_flag_clear(tp, TSO_CAPABLE);
16462 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16463 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16464 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16465 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16466 tg3_flag_set(tp, CLKREQ_BUG);
16467 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16468 tg3_flag_set(tp, L1PLLPD_EN);
16470 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16471 /* BCM5785 devices are effectively PCIe devices, and should
16472 * follow PCIe codepaths, but do not have a PCIe capabilities
16475 tg3_flag_set(tp, PCI_EXPRESS);
16476 } else if (!tg3_flag(tp, 5705_PLUS) ||
16477 tg3_flag(tp, 5780_CLASS)) {
16478 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16479 if (!tp->pcix_cap) {
16480 dev_err(&tp->pdev->dev,
16481 "Cannot find PCI-X capability, aborting\n");
16485 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16486 tg3_flag_set(tp, PCIX_MODE);
16489 /* If we have an AMD 762 or VIA K8T800 chipset, write
16490 * reordering to the mailbox registers done by the host
16491 * controller can cause major troubles. We read back from
16492 * every mailbox register write to force the writes to be
16493 * posted to the chip in order.
16495 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16496 !tg3_flag(tp, PCI_EXPRESS))
16497 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16499 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16500 &tp->pci_cacheline_sz);
16501 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16502 &tp->pci_lat_timer);
16503 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16504 tp->pci_lat_timer < 64) {
16505 tp->pci_lat_timer = 64;
16506 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16507 tp->pci_lat_timer);
16510 /* Important! -- It is critical that the PCI-X hw workaround
16511 * situation is decided before the first MMIO register access.
16513 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16514 /* 5700 BX chips need to have their TX producer index
16515 * mailboxes written twice to workaround a bug.
16517 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16519 /* If we are in PCI-X mode, enable register write workaround.
16521 * The workaround is to use indirect register accesses
16522 * for all chip writes not to mailbox registers.
16524 if (tg3_flag(tp, PCIX_MODE)) {
16527 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16529 /* The chip can have it's power management PCI config
16530 * space registers clobbered due to this bug.
16531 * So explicitly force the chip into D0 here.
16533 pci_read_config_dword(tp->pdev,
16534 tp->pdev->pm_cap + PCI_PM_CTRL,
16536 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16537 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16538 pci_write_config_dword(tp->pdev,
16539 tp->pdev->pm_cap + PCI_PM_CTRL,
16542 /* Also, force SERR#/PERR# in PCI command. */
16543 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16544 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16545 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16549 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16550 tg3_flag_set(tp, PCI_HIGH_SPEED);
16551 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16552 tg3_flag_set(tp, PCI_32BIT);
16554 /* Chip-specific fixup from Broadcom driver */
16555 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16556 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16557 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16558 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16561 /* Default fast path register access methods */
16562 tp->read32 = tg3_read32;
16563 tp->write32 = tg3_write32;
16564 tp->read32_mbox = tg3_read32;
16565 tp->write32_mbox = tg3_write32;
16566 tp->write32_tx_mbox = tg3_write32;
16567 tp->write32_rx_mbox = tg3_write32;
16569 /* Various workaround register access methods */
16570 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16571 tp->write32 = tg3_write_indirect_reg32;
16572 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16573 (tg3_flag(tp, PCI_EXPRESS) &&
16574 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16576 * Back to back register writes can cause problems on these
16577 * chips, the workaround is to read back all reg writes
16578 * except those to mailbox regs.
16580 * See tg3_write_indirect_reg32().
16582 tp->write32 = tg3_write_flush_reg32;
16585 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16586 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16587 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16588 tp->write32_rx_mbox = tg3_write_flush_reg32;
16591 if (tg3_flag(tp, ICH_WORKAROUND)) {
16592 tp->read32 = tg3_read_indirect_reg32;
16593 tp->write32 = tg3_write_indirect_reg32;
16594 tp->read32_mbox = tg3_read_indirect_mbox;
16595 tp->write32_mbox = tg3_write_indirect_mbox;
16596 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16597 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16602 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16603 pci_cmd &= ~PCI_COMMAND_MEMORY;
16604 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16606 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16607 tp->read32_mbox = tg3_read32_mbox_5906;
16608 tp->write32_mbox = tg3_write32_mbox_5906;
16609 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16610 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16613 if (tp->write32 == tg3_write_indirect_reg32 ||
16614 (tg3_flag(tp, PCIX_MODE) &&
16615 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16616 tg3_asic_rev(tp) == ASIC_REV_5701)))
16617 tg3_flag_set(tp, SRAM_USE_CONFIG);
16619 /* The memory arbiter has to be enabled in order for SRAM accesses
16620 * to succeed. Normally on powerup the tg3 chip firmware will make
16621 * sure it is enabled, but other entities such as system netboot
16622 * code might disable it.
16624 val = tr32(MEMARB_MODE);
16625 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16627 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16628 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16629 tg3_flag(tp, 5780_CLASS)) {
16630 if (tg3_flag(tp, PCIX_MODE)) {
16631 pci_read_config_dword(tp->pdev,
16632 tp->pcix_cap + PCI_X_STATUS,
16634 tp->pci_fn = val & 0x7;
16636 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16637 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16638 tg3_asic_rev(tp) == ASIC_REV_5720) {
16639 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16640 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16641 val = tr32(TG3_CPMU_STATUS);
16643 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16644 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16646 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16647 TG3_CPMU_STATUS_FSHFT_5719;
16650 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16651 tp->write32_tx_mbox = tg3_write_flush_reg32;
16652 tp->write32_rx_mbox = tg3_write_flush_reg32;
16655 /* Get eeprom hw config before calling tg3_set_power_state().
16656 * In particular, the TG3_FLAG_IS_NIC flag must be
16657 * determined before calling tg3_set_power_state() so that
16658 * we know whether or not to switch out of Vaux power.
16659 * When the flag is set, it means that GPIO1 is used for eeprom
16660 * write protect and also implies that it is a LOM where GPIOs
16661 * are not used to switch power.
16663 tg3_get_eeprom_hw_cfg(tp);
16665 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16666 tg3_flag_clear(tp, TSO_CAPABLE);
16667 tg3_flag_clear(tp, TSO_BUG);
16668 tp->fw_needed = NULL;
16671 if (tg3_flag(tp, ENABLE_APE)) {
16672 /* Allow reads and writes to the
16673 * APE register and memory space.
16675 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16676 PCISTATE_ALLOW_APE_SHMEM_WR |
16677 PCISTATE_ALLOW_APE_PSPACE_WR;
16678 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16681 tg3_ape_lock_init(tp);
16682 tp->ape_hb_interval =
16683 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16686 /* Set up tp->grc_local_ctrl before calling
16687 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16688 * will bring 5700's external PHY out of reset.
16689 * It is also used as eeprom write protect on LOMs.
16691 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16692 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16693 tg3_flag(tp, EEPROM_WRITE_PROT))
16694 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16695 GRC_LCLCTRL_GPIO_OUTPUT1);
16696 /* Unused GPIO3 must be driven as output on 5752 because there
16697 * are no pull-up resistors on unused GPIO pins.
16699 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16700 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16702 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16703 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16704 tg3_flag(tp, 57765_CLASS))
16705 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16707 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16708 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16709 /* Turn off the debug UART. */
16710 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16711 if (tg3_flag(tp, IS_NIC))
16712 /* Keep VMain power. */
16713 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16714 GRC_LCLCTRL_GPIO_OUTPUT0;
16717 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16718 tp->grc_local_ctrl |=
16719 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16721 /* Switch out of Vaux if it is a NIC */
16722 tg3_pwrsrc_switch_to_vmain(tp);
16724 /* Derive initial jumbo mode from MTU assigned in
16725 * ether_setup() via the alloc_etherdev() call
16727 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16728 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16730 /* Determine WakeOnLan speed to use. */
16731 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16732 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16733 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16734 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16735 tg3_flag_clear(tp, WOL_SPEED_100MB);
16737 tg3_flag_set(tp, WOL_SPEED_100MB);
16740 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16741 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16743 /* A few boards don't want Ethernet@WireSpeed phy feature */
16744 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16745 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16746 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16747 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16748 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16749 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16750 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16752 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16753 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16754 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16755 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16756 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16758 if (tg3_flag(tp, 5705_PLUS) &&
16759 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16760 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16761 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16762 !tg3_flag(tp, 57765_PLUS)) {
16763 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16764 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16765 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16766 tg3_asic_rev(tp) == ASIC_REV_5761) {
16767 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16768 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16769 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16770 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16771 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16773 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16776 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16777 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16778 tp->phy_otp = tg3_read_otp_phycfg(tp);
16779 if (tp->phy_otp == 0)
16780 tp->phy_otp = TG3_OTP_DEFAULT;
16783 if (tg3_flag(tp, CPMU_PRESENT))
16784 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16786 tp->mi_mode = MAC_MI_MODE_BASE;
16788 tp->coalesce_mode = 0;
16789 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16790 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16791 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16793 /* Set these bits to enable statistics workaround. */
16794 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16795 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16796 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16797 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16798 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16799 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16802 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16803 tg3_asic_rev(tp) == ASIC_REV_57780)
16804 tg3_flag_set(tp, USE_PHYLIB);
16806 err = tg3_mdio_init(tp);
16810 /* Initialize data/descriptor byte/word swapping. */
16811 val = tr32(GRC_MODE);
16812 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16813 tg3_asic_rev(tp) == ASIC_REV_5762)
16814 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16815 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16816 GRC_MODE_B2HRX_ENABLE |
16817 GRC_MODE_HTX2B_ENABLE |
16818 GRC_MODE_HOST_STACKUP);
16820 val &= GRC_MODE_HOST_STACKUP;
16822 tw32(GRC_MODE, val | tp->grc_mode);
16824 tg3_switch_clocks(tp);
16826 /* Clear this out for sanity. */
16827 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16829 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16830 tw32(TG3PCI_REG_BASE_ADDR, 0);
16832 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16834 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16835 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16836 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16837 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16838 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16839 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16840 void __iomem *sram_base;
16842 /* Write some dummy words into the SRAM status block
16843 * area, see if it reads back correctly. If the return
16844 * value is bad, force enable the PCIX workaround.
16846 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16848 writel(0x00000000, sram_base);
16849 writel(0x00000000, sram_base + 4);
16850 writel(0xffffffff, sram_base + 4);
16851 if (readl(sram_base) != 0x00000000)
16852 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16857 tg3_nvram_init(tp);
16859 /* If the device has an NVRAM, no need to load patch firmware */
16860 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16861 !tg3_flag(tp, NO_NVRAM))
16862 tp->fw_needed = NULL;
16864 grc_misc_cfg = tr32(GRC_MISC_CFG);
16865 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16867 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16868 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16869 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16870 tg3_flag_set(tp, IS_5788);
16872 if (!tg3_flag(tp, IS_5788) &&
16873 tg3_asic_rev(tp) != ASIC_REV_5700)
16874 tg3_flag_set(tp, TAGGED_STATUS);
16875 if (tg3_flag(tp, TAGGED_STATUS)) {
16876 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16877 HOSTCC_MODE_CLRTICK_TXBD);
16879 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16880 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16881 tp->misc_host_ctrl);
16884 /* Preserve the APE MAC_MODE bits */
16885 if (tg3_flag(tp, ENABLE_APE))
16886 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16890 if (tg3_10_100_only_device(tp, ent))
16891 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16893 err = tg3_phy_probe(tp);
16895 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16896 /* ... but do not return immediately ... */
16901 tg3_read_fw_ver(tp);
16903 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16904 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16906 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16907 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16909 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16912 /* 5700 {AX,BX} chips have a broken status block link
16913 * change bit implementation, so we must use the
16914 * status register in those cases.
16916 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16917 tg3_flag_set(tp, USE_LINKCHG_REG);
16919 tg3_flag_clear(tp, USE_LINKCHG_REG);
16921 /* The led_ctrl is set during tg3_phy_probe, here we might
16922 * have to force the link status polling mechanism based
16923 * upon subsystem IDs.
16925 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16926 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16927 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16928 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16929 tg3_flag_set(tp, USE_LINKCHG_REG);
16932 /* For all SERDES we poll the MAC status register. */
16933 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16934 tg3_flag_set(tp, POLL_SERDES);
16936 tg3_flag_clear(tp, POLL_SERDES);
16938 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16939 tg3_flag_set(tp, POLL_CPMU_LINK);
16941 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16942 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16943 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16944 tg3_flag(tp, PCIX_MODE)) {
16945 tp->rx_offset = NET_SKB_PAD;
16946 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16947 tp->rx_copy_thresh = ~(u16)0;
16951 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16952 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16953 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16955 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16957 /* Increment the rx prod index on the rx std ring by at most
16958 * 8 for these chips to workaround hw errata.
16960 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16961 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16962 tg3_asic_rev(tp) == ASIC_REV_5755)
16963 tp->rx_std_max_post = 8;
16965 if (tg3_flag(tp, ASPM_WORKAROUND))
16966 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16967 PCIE_PWR_MGMT_L1_THRESH_MSK;
16972 #ifdef CONFIG_SPARC
16973 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16975 struct net_device *dev = tp->dev;
16976 struct pci_dev *pdev = tp->pdev;
16977 struct device_node *dp = pci_device_to_OF_node(pdev);
16978 const unsigned char *addr;
16981 addr = of_get_property(dp, "local-mac-address", &len);
16982 if (addr && len == ETH_ALEN) {
16983 memcpy(dev->dev_addr, addr, ETH_ALEN);
16989 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16991 struct net_device *dev = tp->dev;
16993 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16998 static int tg3_get_device_address(struct tg3 *tp)
17000 struct net_device *dev = tp->dev;
17001 u32 hi, lo, mac_offset;
17005 #ifdef CONFIG_SPARC
17006 if (!tg3_get_macaddr_sparc(tp))
17010 if (tg3_flag(tp, IS_SSB_CORE)) {
17011 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17012 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17017 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17018 tg3_flag(tp, 5780_CLASS)) {
17019 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17021 if (tg3_nvram_lock(tp))
17022 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17024 tg3_nvram_unlock(tp);
17025 } else if (tg3_flag(tp, 5717_PLUS)) {
17026 if (tp->pci_fn & 1)
17028 if (tp->pci_fn > 1)
17029 mac_offset += 0x18c;
17030 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17033 /* First try to get it from MAC address mailbox. */
17034 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17035 if ((hi >> 16) == 0x484b) {
17036 dev->dev_addr[0] = (hi >> 8) & 0xff;
17037 dev->dev_addr[1] = (hi >> 0) & 0xff;
17039 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17040 dev->dev_addr[2] = (lo >> 24) & 0xff;
17041 dev->dev_addr[3] = (lo >> 16) & 0xff;
17042 dev->dev_addr[4] = (lo >> 8) & 0xff;
17043 dev->dev_addr[5] = (lo >> 0) & 0xff;
17045 /* Some old bootcode may report a 0 MAC address in SRAM */
17046 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17049 /* Next, try NVRAM. */
17050 if (!tg3_flag(tp, NO_NVRAM) &&
17051 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17052 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17053 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17054 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17056 /* Finally just fetch it out of the MAC control regs. */
17058 hi = tr32(MAC_ADDR_0_HIGH);
17059 lo = tr32(MAC_ADDR_0_LOW);
17061 dev->dev_addr[5] = lo & 0xff;
17062 dev->dev_addr[4] = (lo >> 8) & 0xff;
17063 dev->dev_addr[3] = (lo >> 16) & 0xff;
17064 dev->dev_addr[2] = (lo >> 24) & 0xff;
17065 dev->dev_addr[1] = hi & 0xff;
17066 dev->dev_addr[0] = (hi >> 8) & 0xff;
17070 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17071 #ifdef CONFIG_SPARC
17072 if (!tg3_get_default_macaddr_sparc(tp))
17080 #define BOUNDARY_SINGLE_CACHELINE 1
17081 #define BOUNDARY_MULTI_CACHELINE 2
17083 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17085 int cacheline_size;
17089 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17091 cacheline_size = 1024;
17093 cacheline_size = (int) byte * 4;
17095 /* On 5703 and later chips, the boundary bits have no
17098 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17099 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17100 !tg3_flag(tp, PCI_EXPRESS))
17103 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17104 goal = BOUNDARY_MULTI_CACHELINE;
17106 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17107 goal = BOUNDARY_SINGLE_CACHELINE;
17113 if (tg3_flag(tp, 57765_PLUS)) {
17114 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17121 /* PCI controllers on most RISC systems tend to disconnect
17122 * when a device tries to burst across a cache-line boundary.
17123 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17125 * Unfortunately, for PCI-E there are only limited
17126 * write-side controls for this, and thus for reads
17127 * we will still get the disconnects. We'll also waste
17128 * these PCI cycles for both read and write for chips
17129 * other than 5700 and 5701 which do not implement the
17132 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17133 switch (cacheline_size) {
17138 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17139 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17140 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17142 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17143 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17148 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17149 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17153 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17154 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17157 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17158 switch (cacheline_size) {
17162 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17164 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17170 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17171 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17175 switch (cacheline_size) {
17177 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17179 DMA_RWCTRL_WRITE_BNDRY_16);
17184 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17185 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17186 DMA_RWCTRL_WRITE_BNDRY_32);
17191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17193 DMA_RWCTRL_WRITE_BNDRY_64);
17198 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17199 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17200 DMA_RWCTRL_WRITE_BNDRY_128);
17205 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17206 DMA_RWCTRL_WRITE_BNDRY_256);
17209 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17210 DMA_RWCTRL_WRITE_BNDRY_512);
17214 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17215 DMA_RWCTRL_WRITE_BNDRY_1024);
17224 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17225 int size, bool to_device)
17227 struct tg3_internal_buffer_desc test_desc;
17228 u32 sram_dma_descs;
17231 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17233 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17234 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17235 tw32(RDMAC_STATUS, 0);
17236 tw32(WDMAC_STATUS, 0);
17238 tw32(BUFMGR_MODE, 0);
17239 tw32(FTQ_RESET, 0);
17241 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17242 test_desc.addr_lo = buf_dma & 0xffffffff;
17243 test_desc.nic_mbuf = 0x00002100;
17244 test_desc.len = size;
17247 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17248 * the *second* time the tg3 driver was getting loaded after an
17251 * Broadcom tells me:
17252 * ...the DMA engine is connected to the GRC block and a DMA
17253 * reset may affect the GRC block in some unpredictable way...
17254 * The behavior of resets to individual blocks has not been tested.
17256 * Broadcom noted the GRC reset will also reset all sub-components.
17259 test_desc.cqid_sqid = (13 << 8) | 2;
17261 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17264 test_desc.cqid_sqid = (16 << 8) | 7;
17266 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17269 test_desc.flags = 0x00000005;
17271 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17274 val = *(((u32 *)&test_desc) + i);
17275 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17276 sram_dma_descs + (i * sizeof(u32)));
17277 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17279 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17282 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17284 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17287 for (i = 0; i < 40; i++) {
17291 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17293 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17294 if ((val & 0xffff) == sram_dma_descs) {
17305 #define TEST_BUFFER_SIZE 0x2000
17307 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17308 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17312 static int tg3_test_dma(struct tg3 *tp)
17314 dma_addr_t buf_dma;
17315 u32 *buf, saved_dma_rwctrl;
17318 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17319 &buf_dma, GFP_KERNEL);
17325 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17326 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17328 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17330 if (tg3_flag(tp, 57765_PLUS))
17333 if (tg3_flag(tp, PCI_EXPRESS)) {
17334 /* DMA read watermark not used on PCIE */
17335 tp->dma_rwctrl |= 0x00180000;
17336 } else if (!tg3_flag(tp, PCIX_MODE)) {
17337 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17338 tg3_asic_rev(tp) == ASIC_REV_5750)
17339 tp->dma_rwctrl |= 0x003f0000;
17341 tp->dma_rwctrl |= 0x003f000f;
17343 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17344 tg3_asic_rev(tp) == ASIC_REV_5704) {
17345 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17346 u32 read_water = 0x7;
17348 /* If the 5704 is behind the EPB bridge, we can
17349 * do the less restrictive ONE_DMA workaround for
17350 * better performance.
17352 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17353 tg3_asic_rev(tp) == ASIC_REV_5704)
17354 tp->dma_rwctrl |= 0x8000;
17355 else if (ccval == 0x6 || ccval == 0x7)
17356 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17358 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17360 /* Set bit 23 to enable PCIX hw bug fix */
17362 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17363 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17365 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17366 /* 5780 always in PCIX mode */
17367 tp->dma_rwctrl |= 0x00144000;
17368 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17369 /* 5714 always in PCIX mode */
17370 tp->dma_rwctrl |= 0x00148000;
17372 tp->dma_rwctrl |= 0x001b000f;
17375 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17376 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17378 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17379 tg3_asic_rev(tp) == ASIC_REV_5704)
17380 tp->dma_rwctrl &= 0xfffffff0;
17382 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17383 tg3_asic_rev(tp) == ASIC_REV_5701) {
17384 /* Remove this if it causes problems for some boards. */
17385 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17387 /* On 5700/5701 chips, we need to set this bit.
17388 * Otherwise the chip will issue cacheline transactions
17389 * to streamable DMA memory with not all the byte
17390 * enables turned on. This is an error on several
17391 * RISC PCI controllers, in particular sparc64.
17393 * On 5703/5704 chips, this bit has been reassigned
17394 * a different meaning. In particular, it is used
17395 * on those chips to enable a PCI-X workaround.
17397 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17400 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17403 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17404 tg3_asic_rev(tp) != ASIC_REV_5701)
17407 /* It is best to perform DMA test with maximum write burst size
17408 * to expose the 5700/5701 write DMA bug.
17410 saved_dma_rwctrl = tp->dma_rwctrl;
17411 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17412 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17417 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17420 /* Send the buffer to the chip. */
17421 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17423 dev_err(&tp->pdev->dev,
17424 "%s: Buffer write failed. err = %d\n",
17429 /* Now read it back. */
17430 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17432 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17433 "err = %d\n", __func__, ret);
17438 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17442 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17443 DMA_RWCTRL_WRITE_BNDRY_16) {
17444 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17445 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17446 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17449 dev_err(&tp->pdev->dev,
17450 "%s: Buffer corrupted on read back! "
17451 "(%d != %d)\n", __func__, p[i], i);
17457 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17463 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17464 DMA_RWCTRL_WRITE_BNDRY_16) {
17465 /* DMA test passed without adjusting DMA boundary,
17466 * now look for chipsets that are known to expose the
17467 * DMA bug without failing the test.
17469 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17470 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17471 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17473 /* Safe to use the calculated DMA boundary. */
17474 tp->dma_rwctrl = saved_dma_rwctrl;
17477 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17481 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17486 static void tg3_init_bufmgr_config(struct tg3 *tp)
17488 if (tg3_flag(tp, 57765_PLUS)) {
17489 tp->bufmgr_config.mbuf_read_dma_low_water =
17490 DEFAULT_MB_RDMA_LOW_WATER_5705;
17491 tp->bufmgr_config.mbuf_mac_rx_low_water =
17492 DEFAULT_MB_MACRX_LOW_WATER_57765;
17493 tp->bufmgr_config.mbuf_high_water =
17494 DEFAULT_MB_HIGH_WATER_57765;
17496 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17497 DEFAULT_MB_RDMA_LOW_WATER_5705;
17498 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17499 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17500 tp->bufmgr_config.mbuf_high_water_jumbo =
17501 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17502 } else if (tg3_flag(tp, 5705_PLUS)) {
17503 tp->bufmgr_config.mbuf_read_dma_low_water =
17504 DEFAULT_MB_RDMA_LOW_WATER_5705;
17505 tp->bufmgr_config.mbuf_mac_rx_low_water =
17506 DEFAULT_MB_MACRX_LOW_WATER_5705;
17507 tp->bufmgr_config.mbuf_high_water =
17508 DEFAULT_MB_HIGH_WATER_5705;
17509 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17510 tp->bufmgr_config.mbuf_mac_rx_low_water =
17511 DEFAULT_MB_MACRX_LOW_WATER_5906;
17512 tp->bufmgr_config.mbuf_high_water =
17513 DEFAULT_MB_HIGH_WATER_5906;
17516 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17517 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17518 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17519 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17520 tp->bufmgr_config.mbuf_high_water_jumbo =
17521 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17523 tp->bufmgr_config.mbuf_read_dma_low_water =
17524 DEFAULT_MB_RDMA_LOW_WATER;
17525 tp->bufmgr_config.mbuf_mac_rx_low_water =
17526 DEFAULT_MB_MACRX_LOW_WATER;
17527 tp->bufmgr_config.mbuf_high_water =
17528 DEFAULT_MB_HIGH_WATER;
17530 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17531 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17532 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17533 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17534 tp->bufmgr_config.mbuf_high_water_jumbo =
17535 DEFAULT_MB_HIGH_WATER_JUMBO;
17538 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17539 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17542 static char *tg3_phy_string(struct tg3 *tp)
17544 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17545 case TG3_PHY_ID_BCM5400: return "5400";
17546 case TG3_PHY_ID_BCM5401: return "5401";
17547 case TG3_PHY_ID_BCM5411: return "5411";
17548 case TG3_PHY_ID_BCM5701: return "5701";
17549 case TG3_PHY_ID_BCM5703: return "5703";
17550 case TG3_PHY_ID_BCM5704: return "5704";
17551 case TG3_PHY_ID_BCM5705: return "5705";
17552 case TG3_PHY_ID_BCM5750: return "5750";
17553 case TG3_PHY_ID_BCM5752: return "5752";
17554 case TG3_PHY_ID_BCM5714: return "5714";
17555 case TG3_PHY_ID_BCM5780: return "5780";
17556 case TG3_PHY_ID_BCM5755: return "5755";
17557 case TG3_PHY_ID_BCM5787: return "5787";
17558 case TG3_PHY_ID_BCM5784: return "5784";
17559 case TG3_PHY_ID_BCM5756: return "5722/5756";
17560 case TG3_PHY_ID_BCM5906: return "5906";
17561 case TG3_PHY_ID_BCM5761: return "5761";
17562 case TG3_PHY_ID_BCM5718C: return "5718C";
17563 case TG3_PHY_ID_BCM5718S: return "5718S";
17564 case TG3_PHY_ID_BCM57765: return "57765";
17565 case TG3_PHY_ID_BCM5719C: return "5719C";
17566 case TG3_PHY_ID_BCM5720C: return "5720C";
17567 case TG3_PHY_ID_BCM5762: return "5762C";
17568 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17569 case 0: return "serdes";
17570 default: return "unknown";
17574 static char *tg3_bus_string(struct tg3 *tp, char *str)
17576 if (tg3_flag(tp, PCI_EXPRESS)) {
17577 strcpy(str, "PCI Express");
17579 } else if (tg3_flag(tp, PCIX_MODE)) {
17580 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17582 strcpy(str, "PCIX:");
17584 if ((clock_ctrl == 7) ||
17585 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17586 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17587 strcat(str, "133MHz");
17588 else if (clock_ctrl == 0)
17589 strcat(str, "33MHz");
17590 else if (clock_ctrl == 2)
17591 strcat(str, "50MHz");
17592 else if (clock_ctrl == 4)
17593 strcat(str, "66MHz");
17594 else if (clock_ctrl == 6)
17595 strcat(str, "100MHz");
17597 strcpy(str, "PCI:");
17598 if (tg3_flag(tp, PCI_HIGH_SPEED))
17599 strcat(str, "66MHz");
17601 strcat(str, "33MHz");
17603 if (tg3_flag(tp, PCI_32BIT))
17604 strcat(str, ":32-bit");
17606 strcat(str, ":64-bit");
17610 static void tg3_init_coal(struct tg3 *tp)
17612 struct ethtool_coalesce *ec = &tp->coal;
17614 memset(ec, 0, sizeof(*ec));
17615 ec->cmd = ETHTOOL_GCOALESCE;
17616 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17617 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17618 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17619 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17620 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17621 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17622 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17623 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17624 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17626 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17627 HOSTCC_MODE_CLRTICK_TXBD)) {
17628 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17629 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17630 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17631 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17634 if (tg3_flag(tp, 5705_PLUS)) {
17635 ec->rx_coalesce_usecs_irq = 0;
17636 ec->tx_coalesce_usecs_irq = 0;
17637 ec->stats_block_coalesce_usecs = 0;
17641 static int tg3_init_one(struct pci_dev *pdev,
17642 const struct pci_device_id *ent)
17644 struct net_device *dev;
17647 u32 sndmbx, rcvmbx, intmbx;
17649 u64 dma_mask, persist_dma_mask;
17650 netdev_features_t features = 0;
17652 printk_once(KERN_INFO "%s\n", version);
17654 err = pci_enable_device(pdev);
17656 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17660 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17662 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17663 goto err_out_disable_pdev;
17666 pci_set_master(pdev);
17668 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17671 goto err_out_free_res;
17674 SET_NETDEV_DEV(dev, &pdev->dev);
17676 tp = netdev_priv(dev);
17679 tp->rx_mode = TG3_DEF_RX_MODE;
17680 tp->tx_mode = TG3_DEF_TX_MODE;
17682 tp->pcierr_recovery = false;
17685 tp->msg_enable = tg3_debug;
17687 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17689 if (pdev_is_ssb_gige_core(pdev)) {
17690 tg3_flag_set(tp, IS_SSB_CORE);
17691 if (ssb_gige_must_flush_posted_writes(pdev))
17692 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17693 if (ssb_gige_one_dma_at_once(pdev))
17694 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17695 if (ssb_gige_have_roboswitch(pdev)) {
17696 tg3_flag_set(tp, USE_PHYLIB);
17697 tg3_flag_set(tp, ROBOSWITCH);
17699 if (ssb_gige_is_rgmii(pdev))
17700 tg3_flag_set(tp, RGMII_MODE);
17703 /* The word/byte swap controls here control register access byte
17704 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17707 tp->misc_host_ctrl =
17708 MISC_HOST_CTRL_MASK_PCI_INT |
17709 MISC_HOST_CTRL_WORD_SWAP |
17710 MISC_HOST_CTRL_INDIR_ACCESS |
17711 MISC_HOST_CTRL_PCISTATE_RW;
17713 /* The NONFRM (non-frame) byte/word swap controls take effect
17714 * on descriptor entries, anything which isn't packet data.
17716 * The StrongARM chips on the board (one for tx, one for rx)
17717 * are running in big-endian mode.
17719 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17720 GRC_MODE_WSWAP_NONFRM_DATA);
17721 #ifdef __BIG_ENDIAN
17722 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17724 spin_lock_init(&tp->lock);
17725 spin_lock_init(&tp->indirect_lock);
17726 INIT_WORK(&tp->reset_task, tg3_reset_task);
17728 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17730 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17732 goto err_out_free_dev;
17735 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17736 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17737 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17738 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17739 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17740 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17741 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17742 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17743 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17744 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17745 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17746 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17747 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17750 tg3_flag_set(tp, ENABLE_APE);
17751 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17752 if (!tp->aperegs) {
17753 dev_err(&pdev->dev,
17754 "Cannot map APE registers, aborting\n");
17756 goto err_out_iounmap;
17760 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17761 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17763 dev->ethtool_ops = &tg3_ethtool_ops;
17764 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17765 dev->netdev_ops = &tg3_netdev_ops;
17766 dev->irq = pdev->irq;
17768 err = tg3_get_invariants(tp, ent);
17770 dev_err(&pdev->dev,
17771 "Problem fetching invariants of chip, aborting\n");
17772 goto err_out_apeunmap;
17775 /* The EPB bridge inside 5714, 5715, and 5780 and any
17776 * device behind the EPB cannot support DMA addresses > 40-bit.
17777 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17778 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17779 * do DMA address check in tg3_start_xmit().
17781 if (tg3_flag(tp, IS_5788))
17782 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17783 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17784 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17785 #ifdef CONFIG_HIGHMEM
17786 dma_mask = DMA_BIT_MASK(64);
17789 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17791 /* Configure DMA attributes. */
17792 if (dma_mask > DMA_BIT_MASK(32)) {
17793 err = pci_set_dma_mask(pdev, dma_mask);
17795 features |= NETIF_F_HIGHDMA;
17796 err = pci_set_consistent_dma_mask(pdev,
17799 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17800 "DMA for consistent allocations\n");
17801 goto err_out_apeunmap;
17805 if (err || dma_mask == DMA_BIT_MASK(32)) {
17806 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17808 dev_err(&pdev->dev,
17809 "No usable DMA configuration, aborting\n");
17810 goto err_out_apeunmap;
17814 tg3_init_bufmgr_config(tp);
17816 /* 5700 B0 chips do not support checksumming correctly due
17817 * to hardware bugs.
17819 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17820 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17822 if (tg3_flag(tp, 5755_PLUS))
17823 features |= NETIF_F_IPV6_CSUM;
17826 /* TSO is on by default on chips that support hardware TSO.
17827 * Firmware TSO on older chips gives lower performance, so it
17828 * is off by default, but can be enabled using ethtool.
17830 if ((tg3_flag(tp, HW_TSO_1) ||
17831 tg3_flag(tp, HW_TSO_2) ||
17832 tg3_flag(tp, HW_TSO_3)) &&
17833 (features & NETIF_F_IP_CSUM))
17834 features |= NETIF_F_TSO;
17835 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17836 if (features & NETIF_F_IPV6_CSUM)
17837 features |= NETIF_F_TSO6;
17838 if (tg3_flag(tp, HW_TSO_3) ||
17839 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17840 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17841 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17842 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17843 tg3_asic_rev(tp) == ASIC_REV_57780)
17844 features |= NETIF_F_TSO_ECN;
17847 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17848 NETIF_F_HW_VLAN_CTAG_RX;
17849 dev->vlan_features |= features;
17852 * Add loopback capability only for a subset of devices that support
17853 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17854 * loopback for the remaining devices.
17856 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17857 !tg3_flag(tp, CPMU_PRESENT))
17858 /* Add the loopback capability */
17859 features |= NETIF_F_LOOPBACK;
17861 dev->hw_features |= features;
17862 dev->priv_flags |= IFF_UNICAST_FLT;
17864 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17865 dev->min_mtu = TG3_MIN_MTU;
17866 dev->max_mtu = TG3_MAX_MTU(tp);
17868 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17869 !tg3_flag(tp, TSO_CAPABLE) &&
17870 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17871 tg3_flag_set(tp, MAX_RXPEND_64);
17872 tp->rx_pending = 63;
17875 err = tg3_get_device_address(tp);
17877 dev_err(&pdev->dev,
17878 "Could not obtain valid ethernet address, aborting\n");
17879 goto err_out_apeunmap;
17882 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17883 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17884 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17885 for (i = 0; i < tp->irq_max; i++) {
17886 struct tg3_napi *tnapi = &tp->napi[i];
17889 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17891 tnapi->int_mbox = intmbx;
17897 tnapi->consmbox = rcvmbx;
17898 tnapi->prodmbox = sndmbx;
17901 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17903 tnapi->coal_now = HOSTCC_MODE_NOW;
17905 if (!tg3_flag(tp, SUPPORT_MSIX))
17909 * If we support MSIX, we'll be using RSS. If we're using
17910 * RSS, the first vector only handles link interrupts and the
17911 * remaining vectors handle rx and tx interrupts. Reuse the
17912 * mailbox values for the next iteration. The values we setup
17913 * above are still useful for the single vectored mode.
17927 * Reset chip in case UNDI or EFI driver did not shutdown
17928 * DMA self test will enable WDMAC and we'll see (spurious)
17929 * pending DMA on the PCI bus at that point.
17931 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17932 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17933 tg3_full_lock(tp, 0);
17934 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17935 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17936 tg3_full_unlock(tp);
17939 err = tg3_test_dma(tp);
17941 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17942 goto err_out_apeunmap;
17947 pci_set_drvdata(pdev, dev);
17949 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17950 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17951 tg3_asic_rev(tp) == ASIC_REV_5762)
17952 tg3_flag_set(tp, PTP_CAPABLE);
17954 tg3_timer_init(tp);
17956 tg3_carrier_off(tp);
17958 err = register_netdev(dev);
17960 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17961 goto err_out_apeunmap;
17964 if (tg3_flag(tp, PTP_CAPABLE)) {
17966 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17968 if (IS_ERR(tp->ptp_clock))
17969 tp->ptp_clock = NULL;
17972 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17973 tp->board_part_number,
17974 tg3_chip_rev_id(tp),
17975 tg3_bus_string(tp, str),
17978 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17981 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17982 ethtype = "10/100Base-TX";
17983 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17984 ethtype = "1000Base-SX";
17986 ethtype = "10/100/1000Base-T";
17988 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17989 "(WireSpeed[%d], EEE[%d])\n",
17990 tg3_phy_string(tp), ethtype,
17991 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17992 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17995 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17996 (dev->features & NETIF_F_RXCSUM) != 0,
17997 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17998 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17999 tg3_flag(tp, ENABLE_ASF) != 0,
18000 tg3_flag(tp, TSO_CAPABLE) != 0);
18001 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18003 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18004 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18006 pci_save_state(pdev);
18012 iounmap(tp->aperegs);
18013 tp->aperegs = NULL;
18026 pci_release_regions(pdev);
18028 err_out_disable_pdev:
18029 if (pci_is_enabled(pdev))
18030 pci_disable_device(pdev);
18034 static void tg3_remove_one(struct pci_dev *pdev)
18036 struct net_device *dev = pci_get_drvdata(pdev);
18039 struct tg3 *tp = netdev_priv(dev);
18043 release_firmware(tp->fw);
18045 tg3_reset_task_cancel(tp);
18047 if (tg3_flag(tp, USE_PHYLIB)) {
18052 unregister_netdev(dev);
18054 iounmap(tp->aperegs);
18055 tp->aperegs = NULL;
18062 pci_release_regions(pdev);
18063 pci_disable_device(pdev);
18067 #ifdef CONFIG_PM_SLEEP
18068 static int tg3_suspend(struct device *device)
18070 struct pci_dev *pdev = to_pci_dev(device);
18071 struct net_device *dev = pci_get_drvdata(pdev);
18072 struct tg3 *tp = netdev_priv(dev);
18077 if (!netif_running(dev))
18080 tg3_reset_task_cancel(tp);
18082 tg3_netif_stop(tp);
18084 tg3_timer_stop(tp);
18086 tg3_full_lock(tp, 1);
18087 tg3_disable_ints(tp);
18088 tg3_full_unlock(tp);
18090 netif_device_detach(dev);
18092 tg3_full_lock(tp, 0);
18093 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18094 tg3_flag_clear(tp, INIT_COMPLETE);
18095 tg3_full_unlock(tp);
18097 err = tg3_power_down_prepare(tp);
18101 tg3_full_lock(tp, 0);
18103 tg3_flag_set(tp, INIT_COMPLETE);
18104 err2 = tg3_restart_hw(tp, true);
18108 tg3_timer_start(tp);
18110 netif_device_attach(dev);
18111 tg3_netif_start(tp);
18114 tg3_full_unlock(tp);
18125 static int tg3_resume(struct device *device)
18127 struct pci_dev *pdev = to_pci_dev(device);
18128 struct net_device *dev = pci_get_drvdata(pdev);
18129 struct tg3 *tp = netdev_priv(dev);
18134 if (!netif_running(dev))
18137 netif_device_attach(dev);
18139 tg3_full_lock(tp, 0);
18141 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18143 tg3_flag_set(tp, INIT_COMPLETE);
18144 err = tg3_restart_hw(tp,
18145 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18149 tg3_timer_start(tp);
18151 tg3_netif_start(tp);
18154 tg3_full_unlock(tp);
18163 #endif /* CONFIG_PM_SLEEP */
18165 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18167 static void tg3_shutdown(struct pci_dev *pdev)
18169 struct net_device *dev = pci_get_drvdata(pdev);
18170 struct tg3 *tp = netdev_priv(dev);
18173 netif_device_detach(dev);
18175 if (netif_running(dev))
18178 if (system_state == SYSTEM_POWER_OFF)
18179 tg3_power_down(tp);
18185 * tg3_io_error_detected - called when PCI error is detected
18186 * @pdev: Pointer to PCI device
18187 * @state: The current pci connection state
18189 * This function is called after a PCI bus error affecting
18190 * this device has been detected.
18192 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18193 pci_channel_state_t state)
18195 struct net_device *netdev = pci_get_drvdata(pdev);
18196 struct tg3 *tp = netdev_priv(netdev);
18197 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18199 netdev_info(netdev, "PCI I/O error detected\n");
18203 /* We probably don't have netdev yet */
18204 if (!netdev || !netif_running(netdev))
18207 /* We needn't recover from permanent error */
18208 if (state == pci_channel_io_frozen)
18209 tp->pcierr_recovery = true;
18213 tg3_netif_stop(tp);
18215 tg3_timer_stop(tp);
18217 /* Want to make sure that the reset task doesn't run */
18218 tg3_reset_task_cancel(tp);
18220 netif_device_detach(netdev);
18222 /* Clean up software state, even if MMIO is blocked */
18223 tg3_full_lock(tp, 0);
18224 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18225 tg3_full_unlock(tp);
18228 if (state == pci_channel_io_perm_failure) {
18230 tg3_napi_enable(tp);
18233 err = PCI_ERS_RESULT_DISCONNECT;
18235 pci_disable_device(pdev);
18244 * tg3_io_slot_reset - called after the pci bus has been reset.
18245 * @pdev: Pointer to PCI device
18247 * Restart the card from scratch, as if from a cold-boot.
18248 * At this point, the card has exprienced a hard reset,
18249 * followed by fixups by BIOS, and has its config space
18250 * set up identically to what it was at cold boot.
18252 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18254 struct net_device *netdev = pci_get_drvdata(pdev);
18255 struct tg3 *tp = netdev_priv(netdev);
18256 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18261 if (pci_enable_device(pdev)) {
18262 dev_err(&pdev->dev,
18263 "Cannot re-enable PCI device after reset.\n");
18267 pci_set_master(pdev);
18268 pci_restore_state(pdev);
18269 pci_save_state(pdev);
18271 if (!netdev || !netif_running(netdev)) {
18272 rc = PCI_ERS_RESULT_RECOVERED;
18276 err = tg3_power_up(tp);
18280 rc = PCI_ERS_RESULT_RECOVERED;
18283 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18284 tg3_napi_enable(tp);
18293 * tg3_io_resume - called when traffic can start flowing again.
18294 * @pdev: Pointer to PCI device
18296 * This callback is called when the error recovery driver tells
18297 * us that its OK to resume normal operation.
18299 static void tg3_io_resume(struct pci_dev *pdev)
18301 struct net_device *netdev = pci_get_drvdata(pdev);
18302 struct tg3 *tp = netdev_priv(netdev);
18307 if (!netdev || !netif_running(netdev))
18310 tg3_full_lock(tp, 0);
18311 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18312 tg3_flag_set(tp, INIT_COMPLETE);
18313 err = tg3_restart_hw(tp, true);
18315 tg3_full_unlock(tp);
18316 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18320 netif_device_attach(netdev);
18322 tg3_timer_start(tp);
18324 tg3_netif_start(tp);
18326 tg3_full_unlock(tp);
18331 tp->pcierr_recovery = false;
18335 static const struct pci_error_handlers tg3_err_handler = {
18336 .error_detected = tg3_io_error_detected,
18337 .slot_reset = tg3_io_slot_reset,
18338 .resume = tg3_io_resume
18341 static struct pci_driver tg3_driver = {
18342 .name = DRV_MODULE_NAME,
18343 .id_table = tg3_pci_tbl,
18344 .probe = tg3_init_one,
18345 .remove = tg3_remove_one,
18346 .err_handler = &tg3_err_handler,
18347 .driver.pm = &tg3_pm_ops,
18348 .shutdown = tg3_shutdown,
18351 module_pci_driver(tg3_driver);