2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 117
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "January 25, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JMB_RING_SIZE(tp) \
104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
106 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 #define TG3_RSS_INDIR_TBL_SIZE 128
109 /* Do not place this n-ring entries value into the tp struct itself,
110 * we really want to expose these constants to GCC so that modulo et
111 * al. operations are done with shifts and masks instead of with
112 * hw multiply/modulo instructions. Another solution would be to
113 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_STD_RING_BYTES(tp) \
120 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
121 #define TG3_RX_JMB_RING_BYTES(tp) \
122 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
123 #define TG3_RX_RCB_RING_BYTES(tp) \
124 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
140 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
142 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
143 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
145 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
146 * that are at least dword aligned when used in PCIX mode. The driver
147 * works around this bug by double copying the packet. This workaround
148 * is built into the normal double copy length check for efficiency.
150 * However, the double copy is only necessary on those architectures
151 * where unaligned memory accesses are inefficient. For those architectures
152 * where unaligned memory accesses incur little penalty, we can reintegrate
153 * the 5701 in the normal rx path. Doing so saves a device structure
154 * dereference by hardcoding the double copy threshold in place.
156 #define TG3_RX_COPY_THRESHOLD 256
157 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
158 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
160 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
163 /* minimum number of free TX descriptors required to wake up TX process */
164 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
166 #define TG3_RAW_IP_ALIGN 2
168 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
170 #define FIRMWARE_TG3 "tigon/tg3.bin"
171 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
172 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
174 static char version[] __devinitdata =
175 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
177 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
178 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
179 MODULE_LICENSE("GPL");
180 MODULE_VERSION(DRV_MODULE_VERSION);
181 MODULE_FIRMWARE(FIRMWARE_TG3);
182 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
183 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
185 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
186 module_param(tg3_debug, int, 0);
187 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
189 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
263 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
264 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
265 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
266 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
267 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
268 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
269 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
273 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
275 static const struct {
276 const char string[ETH_GSTRING_LEN];
277 } ethtool_stats_keys[] = {
280 { "rx_ucast_packets" },
281 { "rx_mcast_packets" },
282 { "rx_bcast_packets" },
284 { "rx_align_errors" },
285 { "rx_xon_pause_rcvd" },
286 { "rx_xoff_pause_rcvd" },
287 { "rx_mac_ctrl_rcvd" },
288 { "rx_xoff_entered" },
289 { "rx_frame_too_long_errors" },
291 { "rx_undersize_packets" },
292 { "rx_in_length_errors" },
293 { "rx_out_length_errors" },
294 { "rx_64_or_less_octet_packets" },
295 { "rx_65_to_127_octet_packets" },
296 { "rx_128_to_255_octet_packets" },
297 { "rx_256_to_511_octet_packets" },
298 { "rx_512_to_1023_octet_packets" },
299 { "rx_1024_to_1522_octet_packets" },
300 { "rx_1523_to_2047_octet_packets" },
301 { "rx_2048_to_4095_octet_packets" },
302 { "rx_4096_to_8191_octet_packets" },
303 { "rx_8192_to_9022_octet_packets" },
310 { "tx_flow_control" },
312 { "tx_single_collisions" },
313 { "tx_mult_collisions" },
315 { "tx_excessive_collisions" },
316 { "tx_late_collisions" },
317 { "tx_collide_2times" },
318 { "tx_collide_3times" },
319 { "tx_collide_4times" },
320 { "tx_collide_5times" },
321 { "tx_collide_6times" },
322 { "tx_collide_7times" },
323 { "tx_collide_8times" },
324 { "tx_collide_9times" },
325 { "tx_collide_10times" },
326 { "tx_collide_11times" },
327 { "tx_collide_12times" },
328 { "tx_collide_13times" },
329 { "tx_collide_14times" },
330 { "tx_collide_15times" },
331 { "tx_ucast_packets" },
332 { "tx_mcast_packets" },
333 { "tx_bcast_packets" },
334 { "tx_carrier_sense_errors" },
338 { "dma_writeq_full" },
339 { "dma_write_prioq_full" },
342 { "mbuf_lwm_thresh_hit" },
344 { "rx_threshold_hit" },
346 { "dma_readq_full" },
347 { "dma_read_prioq_full" },
348 { "tx_comp_queue_full" },
350 { "ring_set_send_prod_index" },
351 { "ring_status_update" },
353 { "nic_avoided_irqs" },
354 { "nic_tx_threshold_hit" }
357 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_test_keys[] = {
363 { "nvram test (online) " },
364 { "link test (online) " },
365 { "register test (offline)" },
366 { "memory test (offline)" },
367 { "loopback test (offline)" },
368 { "interrupt test (offline)" },
371 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
374 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
376 writel(val, tp->regs + off);
379 static u32 tg3_read32(struct tg3 *tp, u32 off)
381 return readl(tp->regs + off);
384 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
386 writel(val, tp->aperegs + off);
389 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
391 return readl(tp->aperegs + off);
394 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
404 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
407 readl(tp->regs + off);
410 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
417 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
422 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
426 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
427 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
428 TG3_64BIT_REG_LOW, val);
431 if (off == TG3_RX_STD_PROD_IDX_REG) {
432 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
433 TG3_64BIT_REG_LOW, val);
437 spin_lock_irqsave(&tp->indirect_lock, flags);
438 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
439 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
440 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 /* In indirect mode when disabling interrupts, we also need
443 * to clear the interrupt bit in the GRC local ctrl register.
445 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
447 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
448 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
452 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 /* usec_wait specifies the wait time in usec when writing to certain registers
465 * where it is unsafe to read back the register without some delay.
466 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
467 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
469 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
471 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
472 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 /* Non-posted methods */
474 tp->write32(tp, off, val);
477 tg3_write32(tp, off, val);
482 /* Wait again after the read for the posted method to guarantee that
483 * the wait time is met.
489 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
491 tp->write32_mbox(tp, off, val);
492 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
493 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
494 tp->read32_mbox(tp, off);
497 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
499 void __iomem *mbox = tp->regs + off;
501 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
503 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
507 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
509 return readl(tp->regs + off + GRCMBOX_BASE);
512 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
514 writel(val, tp->regs + off + GRCMBOX_BASE);
517 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
518 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
519 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
520 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
521 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
523 #define tw32(reg, val) tp->write32(tp, reg, val)
524 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
525 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
526 #define tr32(reg) tp->read32(tp, reg)
528 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
532 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
533 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
536 spin_lock_irqsave(&tp->indirect_lock, flags);
537 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
539 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
541 /* Always leave this as zero. */
542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
544 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
545 tw32_f(TG3PCI_MEM_WIN_DATA, val);
547 /* Always leave this as zero. */
548 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
550 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
557 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
558 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
563 spin_lock_irqsave(&tp->indirect_lock, flags);
564 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
566 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568 /* Always leave this as zero. */
569 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
572 *val = tr32(TG3PCI_MEM_WIN_DATA);
574 /* Always leave this as zero. */
575 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577 spin_unlock_irqrestore(&tp->indirect_lock, flags);
580 static void tg3_ape_lock_init(struct tg3 *tp)
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
586 regbase = TG3_APE_LOCK_GRANT;
588 regbase = TG3_APE_PER_LOCK_GRANT;
590 /* Make sure the driver hasn't any stale locks. */
591 for (i = 0; i < 8; i++)
592 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
595 static int tg3_ape_lock(struct tg3 *tp, int locknum)
599 u32 status, req, gnt;
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
605 case TG3_APE_LOCK_GRC:
606 case TG3_APE_LOCK_MEM:
612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
613 req = TG3_APE_LOCK_REQ;
614 gnt = TG3_APE_LOCK_GRANT;
616 req = TG3_APE_PER_LOCK_REQ;
617 gnt = TG3_APE_PER_LOCK_GRANT;
622 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
624 /* Wait for up to 1 millisecond to acquire lock. */
625 for (i = 0; i < 100; i++) {
626 status = tg3_ape_read32(tp, gnt + off);
627 if (status == APE_LOCK_GRANT_DRIVER)
632 if (status != APE_LOCK_GRANT_DRIVER) {
633 /* Revoke the lock request. */
634 tg3_ape_write32(tp, gnt + off,
635 APE_LOCK_GRANT_DRIVER);
643 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
647 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
651 case TG3_APE_LOCK_GRC:
652 case TG3_APE_LOCK_MEM:
658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
659 gnt = TG3_APE_LOCK_GRANT;
661 gnt = TG3_APE_PER_LOCK_GRANT;
663 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
666 static void tg3_disable_ints(struct tg3 *tp)
670 tw32(TG3PCI_MISC_HOST_CTRL,
671 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
672 for (i = 0; i < tp->irq_max; i++)
673 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
676 static void tg3_enable_ints(struct tg3 *tp)
683 tw32(TG3PCI_MISC_HOST_CTRL,
684 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
686 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
687 for (i = 0; i < tp->irq_cnt; i++) {
688 struct tg3_napi *tnapi = &tp->napi[i];
690 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
691 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
692 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
694 tp->coal_now |= tnapi->coal_now;
697 /* Force an initial interrupt */
698 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
699 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
700 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
702 tw32(HOSTCC_MODE, tp->coal_now);
704 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
707 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
709 struct tg3 *tp = tnapi->tp;
710 struct tg3_hw_status *sblk = tnapi->hw_status;
711 unsigned int work_exists = 0;
713 /* check for phy events */
714 if (!(tp->tg3_flags &
715 (TG3_FLAG_USE_LINKCHG_REG |
716 TG3_FLAG_POLL_SERDES))) {
717 if (sblk->status & SD_STATUS_LINK_CHG)
720 /* check for RX/TX work to do */
721 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
722 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
729 * similar to tg3_enable_ints, but it accurately determines whether there
730 * is new work pending and can return without flushing the PIO write
731 * which reenables interrupts
733 static void tg3_int_reenable(struct tg3_napi *tnapi)
735 struct tg3 *tp = tnapi->tp;
737 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
740 /* When doing tagged status, this work check is unnecessary.
741 * The last_tag we write above tells the chip which piece of
742 * work we've completed.
744 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
746 tw32(HOSTCC_MODE, tp->coalesce_mode |
747 HOSTCC_MODE_ENABLE | tnapi->coal_now);
750 static void tg3_switch_clocks(struct tg3 *tp)
755 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
759 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
761 orig_clock_ctrl = clock_ctrl;
762 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763 CLOCK_CTRL_CLKRUN_OENABLE |
765 tp->pci_clock_ctrl = clock_ctrl;
767 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 tw32_wait_f(TG3PCI_CLOCK_CTRL,
770 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
772 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
777 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 clock_ctrl | (CLOCK_CTRL_ALTCLK),
781 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
784 #define PHY_BUSY_LOOPS 5000
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
800 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801 MI_COM_PHY_ADDR_MASK);
802 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803 MI_COM_REG_ADDR_MASK);
804 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
806 tw32_f(MAC_MI_COM, frame_val);
808 loops = PHY_BUSY_LOOPS;
811 frame_val = tr32(MAC_MI_COM);
813 if ((frame_val & MI_COM_BUSY) == 0) {
815 frame_val = tr32(MAC_MI_COM);
823 *val = frame_val & MI_COM_DATA_MASK;
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
841 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
842 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
845 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (val & MI_COM_DATA_MASK);
856 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
858 tw32_f(MAC_MI_COM, frame_val);
860 loops = PHY_BUSY_LOOPS;
863 frame_val = tr32(MAC_MI_COM);
864 if ((frame_val & MI_COM_BUSY) == 0) {
866 frame_val = tr32(MAC_MI_COM);
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
888 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
892 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
896 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
897 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
901 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
907 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
911 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
915 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
919 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
920 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
924 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
930 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
934 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
936 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
941 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
945 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
947 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
952 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
956 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
957 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
958 MII_TG3_AUXCTL_SHDWSEL_MISC);
960 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
965 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
967 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
968 set |= MII_TG3_AUXCTL_MISC_WREN;
970 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
973 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
974 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
975 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
976 MII_TG3_AUXCTL_ACTL_TX_6DB)
978 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
979 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
980 MII_TG3_AUXCTL_ACTL_TX_6DB);
982 static int tg3_bmcr_reset(struct tg3 *tp)
987 /* OK, reset it, and poll the BMCR_RESET bit until it
988 * clears or we time out.
990 phy_control = BMCR_RESET;
991 err = tg3_writephy(tp, MII_BMCR, phy_control);
997 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1001 if ((phy_control & BMCR_RESET) == 0) {
1013 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1015 struct tg3 *tp = bp->priv;
1018 spin_lock_bh(&tp->lock);
1020 if (tg3_readphy(tp, reg, &val))
1023 spin_unlock_bh(&tp->lock);
1028 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1030 struct tg3 *tp = bp->priv;
1033 spin_lock_bh(&tp->lock);
1035 if (tg3_writephy(tp, reg, val))
1038 spin_unlock_bh(&tp->lock);
1043 static int tg3_mdio_reset(struct mii_bus *bp)
1048 static void tg3_mdio_config_5785(struct tg3 *tp)
1051 struct phy_device *phydev;
1053 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1054 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1055 case PHY_ID_BCM50610:
1056 case PHY_ID_BCM50610M:
1057 val = MAC_PHYCFG2_50610_LED_MODES;
1059 case PHY_ID_BCMAC131:
1060 val = MAC_PHYCFG2_AC131_LED_MODES;
1062 case PHY_ID_RTL8211C:
1063 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1065 case PHY_ID_RTL8201E:
1066 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1072 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1073 tw32(MAC_PHYCFG2, val);
1075 val = tr32(MAC_PHYCFG1);
1076 val &= ~(MAC_PHYCFG1_RGMII_INT |
1077 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1078 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1079 tw32(MAC_PHYCFG1, val);
1084 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1085 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1086 MAC_PHYCFG2_FMODE_MASK_MASK |
1087 MAC_PHYCFG2_GMODE_MASK_MASK |
1088 MAC_PHYCFG2_ACT_MASK_MASK |
1089 MAC_PHYCFG2_QUAL_MASK_MASK |
1090 MAC_PHYCFG2_INBAND_ENABLE;
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1096 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1097 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1098 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1099 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1100 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1101 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1103 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1104 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1105 tw32(MAC_PHYCFG1, val);
1107 val = tr32(MAC_EXT_RGMII_MODE);
1108 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1109 MAC_RGMII_MODE_RX_QUALITY |
1110 MAC_RGMII_MODE_RX_ACTIVITY |
1111 MAC_RGMII_MODE_RX_ENG_DET |
1112 MAC_RGMII_MODE_TX_ENABLE |
1113 MAC_RGMII_MODE_TX_LOWPWR |
1114 MAC_RGMII_MODE_TX_RESET);
1115 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1116 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1117 val |= MAC_RGMII_MODE_RX_INT_B |
1118 MAC_RGMII_MODE_RX_QUALITY |
1119 MAC_RGMII_MODE_RX_ACTIVITY |
1120 MAC_RGMII_MODE_RX_ENG_DET;
1121 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1122 val |= MAC_RGMII_MODE_TX_ENABLE |
1123 MAC_RGMII_MODE_TX_LOWPWR |
1124 MAC_RGMII_MODE_TX_RESET;
1126 tw32(MAC_EXT_RGMII_MODE, val);
1129 static void tg3_mdio_start(struct tg3 *tp)
1131 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1132 tw32_f(MAC_MI_MODE, tp->mi_mode);
1135 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1137 tg3_mdio_config_5785(tp);
1140 static int tg3_mdio_init(struct tg3 *tp)
1144 struct phy_device *phydev;
1146 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
1149 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1151 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1152 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1154 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1155 TG3_CPMU_PHY_STRAP_IS_SERDES;
1159 tp->phy_addr = TG3_PHY_MII_ADDR;
1163 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1164 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1167 tp->mdio_bus = mdiobus_alloc();
1168 if (tp->mdio_bus == NULL)
1171 tp->mdio_bus->name = "tg3 mdio bus";
1172 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1173 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1174 tp->mdio_bus->priv = tp;
1175 tp->mdio_bus->parent = &tp->pdev->dev;
1176 tp->mdio_bus->read = &tg3_mdio_read;
1177 tp->mdio_bus->write = &tg3_mdio_write;
1178 tp->mdio_bus->reset = &tg3_mdio_reset;
1179 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1180 tp->mdio_bus->irq = &tp->mdio_irq[0];
1182 for (i = 0; i < PHY_MAX_ADDR; i++)
1183 tp->mdio_bus->irq[i] = PHY_POLL;
1185 /* The bus registration will look for all the PHYs on the mdio bus.
1186 * Unfortunately, it does not ensure the PHY is powered up before
1187 * accessing the PHY ID registers. A chip reset is the
1188 * quickest way to bring the device back to an operational state..
1190 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1193 i = mdiobus_register(tp->mdio_bus);
1195 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1196 mdiobus_free(tp->mdio_bus);
1200 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1202 if (!phydev || !phydev->drv) {
1203 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1204 mdiobus_unregister(tp->mdio_bus);
1205 mdiobus_free(tp->mdio_bus);
1209 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1210 case PHY_ID_BCM57780:
1211 phydev->interface = PHY_INTERFACE_MODE_GMII;
1212 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1214 case PHY_ID_BCM50610:
1215 case PHY_ID_BCM50610M:
1216 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1217 PHY_BRCM_RX_REFCLK_UNUSED |
1218 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1219 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1220 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1221 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1222 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1223 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1224 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1225 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1227 case PHY_ID_RTL8211C:
1228 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1230 case PHY_ID_RTL8201E:
1231 case PHY_ID_BCMAC131:
1232 phydev->interface = PHY_INTERFACE_MODE_MII;
1233 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1238 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1241 tg3_mdio_config_5785(tp);
1246 static void tg3_mdio_fini(struct tg3 *tp)
1248 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1249 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1250 mdiobus_unregister(tp->mdio_bus);
1251 mdiobus_free(tp->mdio_bus);
1255 /* tp->lock is held. */
1256 static inline void tg3_generate_fw_event(struct tg3 *tp)
1260 val = tr32(GRC_RX_CPU_EVENT);
1261 val |= GRC_RX_CPU_DRIVER_EVENT;
1262 tw32_f(GRC_RX_CPU_EVENT, val);
1264 tp->last_event_jiffies = jiffies;
1267 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1269 /* tp->lock is held. */
1270 static void tg3_wait_for_event_ack(struct tg3 *tp)
1273 unsigned int delay_cnt;
1276 /* If enough time has passed, no wait is necessary. */
1277 time_remain = (long)(tp->last_event_jiffies + 1 +
1278 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1280 if (time_remain < 0)
1283 /* Check if we can shorten the wait time. */
1284 delay_cnt = jiffies_to_usecs(time_remain);
1285 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1286 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1287 delay_cnt = (delay_cnt >> 3) + 1;
1289 for (i = 0; i < delay_cnt; i++) {
1290 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1296 /* tp->lock is held. */
1297 static void tg3_ump_link_report(struct tg3 *tp)
1302 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1303 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1306 tg3_wait_for_event_ack(tp);
1308 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1310 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1313 if (!tg3_readphy(tp, MII_BMCR, ®))
1315 if (!tg3_readphy(tp, MII_BMSR, ®))
1316 val |= (reg & 0xffff);
1317 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1320 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1322 if (!tg3_readphy(tp, MII_LPA, ®))
1323 val |= (reg & 0xffff);
1324 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1327 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1328 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1330 if (!tg3_readphy(tp, MII_STAT1000, ®))
1331 val |= (reg & 0xffff);
1333 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1335 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1339 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1341 tg3_generate_fw_event(tp);
1344 static void tg3_link_report(struct tg3 *tp)
1346 if (!netif_carrier_ok(tp->dev)) {
1347 netif_info(tp, link, tp->dev, "Link is down\n");
1348 tg3_ump_link_report(tp);
1349 } else if (netif_msg_link(tp)) {
1350 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1351 (tp->link_config.active_speed == SPEED_1000 ?
1353 (tp->link_config.active_speed == SPEED_100 ?
1355 (tp->link_config.active_duplex == DUPLEX_FULL ?
1358 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1359 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1361 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1364 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1365 netdev_info(tp->dev, "EEE is %s\n",
1366 tp->setlpicnt ? "enabled" : "disabled");
1368 tg3_ump_link_report(tp);
1372 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1376 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1377 miireg = ADVERTISE_PAUSE_CAP;
1378 else if (flow_ctrl & FLOW_CTRL_TX)
1379 miireg = ADVERTISE_PAUSE_ASYM;
1380 else if (flow_ctrl & FLOW_CTRL_RX)
1381 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1388 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1392 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1393 miireg = ADVERTISE_1000XPAUSE;
1394 else if (flow_ctrl & FLOW_CTRL_TX)
1395 miireg = ADVERTISE_1000XPSE_ASYM;
1396 else if (flow_ctrl & FLOW_CTRL_RX)
1397 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1404 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1408 if (lcladv & ADVERTISE_1000XPAUSE) {
1409 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1410 if (rmtadv & LPA_1000XPAUSE)
1411 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1412 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1415 if (rmtadv & LPA_1000XPAUSE)
1416 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1418 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1419 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1426 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1430 u32 old_rx_mode = tp->rx_mode;
1431 u32 old_tx_mode = tp->tx_mode;
1433 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1434 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1436 autoneg = tp->link_config.autoneg;
1438 if (autoneg == AUTONEG_ENABLE &&
1439 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1440 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1441 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1443 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1445 flowctrl = tp->link_config.flowctrl;
1447 tp->link_config.active_flowctrl = flowctrl;
1449 if (flowctrl & FLOW_CTRL_RX)
1450 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1452 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1454 if (old_rx_mode != tp->rx_mode)
1455 tw32_f(MAC_RX_MODE, tp->rx_mode);
1457 if (flowctrl & FLOW_CTRL_TX)
1458 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1460 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1462 if (old_tx_mode != tp->tx_mode)
1463 tw32_f(MAC_TX_MODE, tp->tx_mode);
1466 static void tg3_adjust_link(struct net_device *dev)
1468 u8 oldflowctrl, linkmesg = 0;
1469 u32 mac_mode, lcl_adv, rmt_adv;
1470 struct tg3 *tp = netdev_priv(dev);
1471 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1473 spin_lock_bh(&tp->lock);
1475 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1476 MAC_MODE_HALF_DUPLEX);
1478 oldflowctrl = tp->link_config.active_flowctrl;
1484 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1485 mac_mode |= MAC_MODE_PORT_MODE_MII;
1486 else if (phydev->speed == SPEED_1000 ||
1487 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1488 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1490 mac_mode |= MAC_MODE_PORT_MODE_MII;
1492 if (phydev->duplex == DUPLEX_HALF)
1493 mac_mode |= MAC_MODE_HALF_DUPLEX;
1495 lcl_adv = tg3_advert_flowctrl_1000T(
1496 tp->link_config.flowctrl);
1499 rmt_adv = LPA_PAUSE_CAP;
1500 if (phydev->asym_pause)
1501 rmt_adv |= LPA_PAUSE_ASYM;
1504 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1506 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508 if (mac_mode != tp->mac_mode) {
1509 tp->mac_mode = mac_mode;
1510 tw32_f(MAC_MODE, tp->mac_mode);
1514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1515 if (phydev->speed == SPEED_10)
1517 MAC_MI_STAT_10MBPS_MODE |
1518 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1520 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1523 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1524 tw32(MAC_TX_LENGTHS,
1525 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1526 (6 << TX_LENGTHS_IPG_SHIFT) |
1527 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1529 tw32(MAC_TX_LENGTHS,
1530 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1531 (6 << TX_LENGTHS_IPG_SHIFT) |
1532 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1534 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1535 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1536 phydev->speed != tp->link_config.active_speed ||
1537 phydev->duplex != tp->link_config.active_duplex ||
1538 oldflowctrl != tp->link_config.active_flowctrl)
1541 tp->link_config.active_speed = phydev->speed;
1542 tp->link_config.active_duplex = phydev->duplex;
1544 spin_unlock_bh(&tp->lock);
1547 tg3_link_report(tp);
1550 static int tg3_phy_init(struct tg3 *tp)
1552 struct phy_device *phydev;
1554 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1557 /* Bring the PHY back to a known state. */
1560 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1562 /* Attach the MAC to the PHY. */
1563 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1564 phydev->dev_flags, phydev->interface);
1565 if (IS_ERR(phydev)) {
1566 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1567 return PTR_ERR(phydev);
1570 /* Mask with MAC supported features. */
1571 switch (phydev->interface) {
1572 case PHY_INTERFACE_MODE_GMII:
1573 case PHY_INTERFACE_MODE_RGMII:
1574 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1575 phydev->supported &= (PHY_GBIT_FEATURES |
1577 SUPPORTED_Asym_Pause);
1581 case PHY_INTERFACE_MODE_MII:
1582 phydev->supported &= (PHY_BASIC_FEATURES |
1584 SUPPORTED_Asym_Pause);
1587 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1591 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1593 phydev->advertising = phydev->supported;
1598 static void tg3_phy_start(struct tg3 *tp)
1600 struct phy_device *phydev;
1602 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1605 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1608 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1609 phydev->speed = tp->link_config.orig_speed;
1610 phydev->duplex = tp->link_config.orig_duplex;
1611 phydev->autoneg = tp->link_config.orig_autoneg;
1612 phydev->advertising = tp->link_config.orig_advertising;
1617 phy_start_aneg(phydev);
1620 static void tg3_phy_stop(struct tg3 *tp)
1622 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1625 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1628 static void tg3_phy_fini(struct tg3 *tp)
1630 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1631 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1632 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1636 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1640 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1643 tg3_writephy(tp, MII_TG3_FET_TEST,
1644 phytest | MII_TG3_FET_SHADOW_EN);
1645 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1647 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1649 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1650 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1652 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1656 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1661 ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
1662 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1665 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1666 tg3_phy_fet_toggle_apd(tp, enable);
1670 reg = MII_TG3_MISC_SHDW_WREN |
1671 MII_TG3_MISC_SHDW_SCR5_SEL |
1672 MII_TG3_MISC_SHDW_SCR5_LPED |
1673 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1674 MII_TG3_MISC_SHDW_SCR5_SDTL |
1675 MII_TG3_MISC_SHDW_SCR5_C125OE;
1676 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1677 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1679 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1682 reg = MII_TG3_MISC_SHDW_WREN |
1683 MII_TG3_MISC_SHDW_APD_SEL |
1684 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1686 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1688 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1691 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1695 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1696 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1699 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1702 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1703 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1705 tg3_writephy(tp, MII_TG3_FET_TEST,
1706 ephy | MII_TG3_FET_SHADOW_EN);
1707 if (!tg3_readphy(tp, reg, &phy)) {
1709 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1711 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1712 tg3_writephy(tp, reg, phy);
1714 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1719 ret = tg3_phy_auxctl_read(tp,
1720 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1723 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1725 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1726 tg3_phy_auxctl_write(tp,
1727 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1732 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1737 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1740 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1743 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1746 static void tg3_phy_apply_otp(struct tg3 *tp)
1755 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1758 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1759 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1760 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1762 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1763 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1764 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1766 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1767 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1768 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1770 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1771 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1773 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1774 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1776 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1777 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1778 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1780 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1783 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1787 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1792 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1793 current_link_up == 1 &&
1794 tp->link_config.active_duplex == DUPLEX_FULL &&
1795 (tp->link_config.active_speed == SPEED_100 ||
1796 tp->link_config.active_speed == SPEED_1000)) {
1799 if (tp->link_config.active_speed == SPEED_1000)
1800 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1802 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1804 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1806 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1807 TG3_CL45_D7_EEERES_STAT, &val);
1810 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1811 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1814 case ASIC_REV_57765:
1815 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1816 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1818 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1822 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1827 if (!tp->setlpicnt) {
1828 val = tr32(TG3_CPMU_EEE_MODE);
1829 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1833 static int tg3_wait_macro_done(struct tg3 *tp)
1840 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1841 if ((tmp32 & 0x1000) == 0)
1851 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1853 static const u32 test_pat[4][6] = {
1854 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1855 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1856 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1857 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1861 for (chan = 0; chan < 4; chan++) {
1864 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1865 (chan * 0x2000) | 0x0200);
1866 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1868 for (i = 0; i < 6; i++)
1869 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1872 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1873 if (tg3_wait_macro_done(tp)) {
1878 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1879 (chan * 0x2000) | 0x0200);
1880 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1881 if (tg3_wait_macro_done(tp)) {
1886 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1887 if (tg3_wait_macro_done(tp)) {
1892 for (i = 0; i < 6; i += 2) {
1895 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1896 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1897 tg3_wait_macro_done(tp)) {
1903 if (low != test_pat[chan][i] ||
1904 high != test_pat[chan][i+1]) {
1905 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1906 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1917 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1921 for (chan = 0; chan < 4; chan++) {
1924 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1925 (chan * 0x2000) | 0x0200);
1926 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1927 for (i = 0; i < 6; i++)
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1929 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1930 if (tg3_wait_macro_done(tp))
1937 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1939 u32 reg32, phy9_orig;
1940 int retries, do_phy_reset, err;
1946 err = tg3_bmcr_reset(tp);
1952 /* Disable transmitter and interrupt. */
1953 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1957 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1959 /* Set full-duplex, 1000 mbps. */
1960 tg3_writephy(tp, MII_BMCR,
1961 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1963 /* Set to master mode. */
1964 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1967 tg3_writephy(tp, MII_TG3_CTRL,
1968 (MII_TG3_CTRL_AS_MASTER |
1969 MII_TG3_CTRL_ENABLE_AS_MASTER));
1971 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1975 /* Block the PHY control access. */
1976 tg3_phydsp_write(tp, 0x8005, 0x0800);
1978 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1981 } while (--retries);
1983 err = tg3_phy_reset_chanpat(tp);
1987 tg3_phydsp_write(tp, 0x8005, 0x0000);
1989 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1990 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1992 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1994 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1996 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1998 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2005 /* This will reset the tigon3 PHY if there is no valid
2006 * link unless the FORCE argument is non-zero.
2008 static int tg3_phy_reset(struct tg3 *tp)
2013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2014 val = tr32(GRC_MISC_CFG);
2015 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2018 err = tg3_readphy(tp, MII_BMSR, &val);
2019 err |= tg3_readphy(tp, MII_BMSR, &val);
2023 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2024 netif_carrier_off(tp->dev);
2025 tg3_link_report(tp);
2028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2031 err = tg3_phy_reset_5703_4_5(tp);
2038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2039 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2040 cpmuctrl = tr32(TG3_CPMU_CTRL);
2041 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2043 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2046 err = tg3_bmcr_reset(tp);
2050 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2051 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2052 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2054 tw32(TG3_CPMU_CTRL, cpmuctrl);
2057 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2058 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2059 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2060 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2061 CPMU_LSPD_1000MB_MACCLK_12_5) {
2062 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2064 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2068 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
2069 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2072 tg3_phy_apply_otp(tp);
2074 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2075 tg3_phy_toggle_apd(tp, true);
2077 tg3_phy_toggle_apd(tp, false);
2080 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2081 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2082 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2083 tg3_phydsp_write(tp, 0x000a, 0x0323);
2084 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2087 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2088 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2089 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2092 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2093 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2094 tg3_phydsp_write(tp, 0x000a, 0x310b);
2095 tg3_phydsp_write(tp, 0x201f, 0x9506);
2096 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2097 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2100 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2101 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2102 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2103 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2104 tg3_writephy(tp, MII_TG3_TEST1,
2105 MII_TG3_TEST1_TRIM_EN | 0x4);
2107 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2109 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2113 /* Set Extended packet length bit (bit 14) on all chips that */
2114 /* support jumbo frames */
2115 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2116 /* Cannot do read-modify-write on 5401 */
2117 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2118 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2119 /* Set bit 14 with read-modify-write to preserve other bits */
2120 err = tg3_phy_auxctl_read(tp,
2121 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2123 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2124 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2127 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2128 * jumbo frames transmission.
2130 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2131 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2132 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2133 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2137 /* adjust output voltage */
2138 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2141 tg3_phy_toggle_automdix(tp, 1);
2142 tg3_phy_set_wirespeed(tp);
2146 static void tg3_frob_aux_power(struct tg3 *tp)
2148 bool need_vaux = false;
2150 /* The GPIOs do something completely different on 57765. */
2151 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2156 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2160 tp->pdev_peer != tp->pdev) {
2161 struct net_device *dev_peer;
2163 dev_peer = pci_get_drvdata(tp->pdev_peer);
2165 /* remove_one() may have been run on the peer. */
2167 struct tg3 *tp_peer = netdev_priv(dev_peer);
2169 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2172 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2173 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2178 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2179 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2185 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2186 (GRC_LCLCTRL_GPIO_OE0 |
2187 GRC_LCLCTRL_GPIO_OE1 |
2188 GRC_LCLCTRL_GPIO_OE2 |
2189 GRC_LCLCTRL_GPIO_OUTPUT0 |
2190 GRC_LCLCTRL_GPIO_OUTPUT1),
2192 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2194 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2195 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2196 GRC_LCLCTRL_GPIO_OE1 |
2197 GRC_LCLCTRL_GPIO_OE2 |
2198 GRC_LCLCTRL_GPIO_OUTPUT0 |
2199 GRC_LCLCTRL_GPIO_OUTPUT1 |
2201 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2203 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2204 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2206 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2207 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2210 u32 grc_local_ctrl = 0;
2212 /* Workaround to prevent overdrawing Amps. */
2213 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2215 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2216 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2217 grc_local_ctrl, 100);
2220 /* On 5753 and variants, GPIO2 cannot be used. */
2221 no_gpio2 = tp->nic_sram_data_cfg &
2222 NIC_SRAM_DATA_CFG_NO_GPIO2;
2224 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2225 GRC_LCLCTRL_GPIO_OE1 |
2226 GRC_LCLCTRL_GPIO_OE2 |
2227 GRC_LCLCTRL_GPIO_OUTPUT1 |
2228 GRC_LCLCTRL_GPIO_OUTPUT2;
2230 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2231 GRC_LCLCTRL_GPIO_OUTPUT2);
2233 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2234 grc_local_ctrl, 100);
2236 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239 grc_local_ctrl, 100);
2242 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2243 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2244 grc_local_ctrl, 100);
2248 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2249 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2250 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2251 (GRC_LCLCTRL_GPIO_OE1 |
2252 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2254 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255 GRC_LCLCTRL_GPIO_OE1, 100);
2257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2258 (GRC_LCLCTRL_GPIO_OE1 |
2259 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2264 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2266 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2268 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2269 if (speed != SPEED_10)
2271 } else if (speed == SPEED_10)
2277 static int tg3_setup_phy(struct tg3 *, int);
2279 #define RESET_KIND_SHUTDOWN 0
2280 #define RESET_KIND_INIT 1
2281 #define RESET_KIND_SUSPEND 2
2283 static void tg3_write_sig_post_reset(struct tg3 *, int);
2284 static int tg3_halt_cpu(struct tg3 *, u32);
2286 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2290 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2292 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2293 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2296 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2297 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2298 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2305 val = tr32(GRC_MISC_CFG);
2306 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2309 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2311 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2314 tg3_writephy(tp, MII_ADVERTISE, 0);
2315 tg3_writephy(tp, MII_BMCR,
2316 BMCR_ANENABLE | BMCR_ANRESTART);
2318 tg3_writephy(tp, MII_TG3_FET_TEST,
2319 phytest | MII_TG3_FET_SHADOW_EN);
2320 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2321 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2323 MII_TG3_FET_SHDW_AUXMODE4,
2326 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2329 } else if (do_low_power) {
2330 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2331 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2333 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2334 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2335 MII_TG3_AUXCTL_PCTL_VREG_11V;
2336 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2339 /* The PHY should not be powered down on some chips because
2342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2344 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2345 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2348 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2349 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2350 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2351 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2352 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2353 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2356 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2359 /* tp->lock is held. */
2360 static int tg3_nvram_lock(struct tg3 *tp)
2362 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2365 if (tp->nvram_lock_cnt == 0) {
2366 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2367 for (i = 0; i < 8000; i++) {
2368 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2373 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2377 tp->nvram_lock_cnt++;
2382 /* tp->lock is held. */
2383 static void tg3_nvram_unlock(struct tg3 *tp)
2385 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2386 if (tp->nvram_lock_cnt > 0)
2387 tp->nvram_lock_cnt--;
2388 if (tp->nvram_lock_cnt == 0)
2389 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2393 /* tp->lock is held. */
2394 static void tg3_enable_nvram_access(struct tg3 *tp)
2396 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2397 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2398 u32 nvaccess = tr32(NVRAM_ACCESS);
2400 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2404 /* tp->lock is held. */
2405 static void tg3_disable_nvram_access(struct tg3 *tp)
2407 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2408 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2409 u32 nvaccess = tr32(NVRAM_ACCESS);
2411 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2415 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2416 u32 offset, u32 *val)
2421 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2424 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2425 EEPROM_ADDR_DEVID_MASK |
2427 tw32(GRC_EEPROM_ADDR,
2429 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2430 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2431 EEPROM_ADDR_ADDR_MASK) |
2432 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2434 for (i = 0; i < 1000; i++) {
2435 tmp = tr32(GRC_EEPROM_ADDR);
2437 if (tmp & EEPROM_ADDR_COMPLETE)
2441 if (!(tmp & EEPROM_ADDR_COMPLETE))
2444 tmp = tr32(GRC_EEPROM_DATA);
2447 * The data will always be opposite the native endian
2448 * format. Perform a blind byteswap to compensate.
2455 #define NVRAM_CMD_TIMEOUT 10000
2457 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2461 tw32(NVRAM_CMD, nvram_cmd);
2462 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2464 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2470 if (i == NVRAM_CMD_TIMEOUT)
2476 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2478 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2479 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2480 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2481 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2482 (tp->nvram_jedecnum == JEDEC_ATMEL))
2484 addr = ((addr / tp->nvram_pagesize) <<
2485 ATMEL_AT45DB0X1B_PAGE_POS) +
2486 (addr % tp->nvram_pagesize);
2491 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2493 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2494 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2495 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2496 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2497 (tp->nvram_jedecnum == JEDEC_ATMEL))
2499 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2500 tp->nvram_pagesize) +
2501 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2506 /* NOTE: Data read in from NVRAM is byteswapped according to
2507 * the byteswapping settings for all other register accesses.
2508 * tg3 devices are BE devices, so on a BE machine, the data
2509 * returned will be exactly as it is seen in NVRAM. On a LE
2510 * machine, the 32-bit value will be byteswapped.
2512 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2516 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2517 return tg3_nvram_read_using_eeprom(tp, offset, val);
2519 offset = tg3_nvram_phys_addr(tp, offset);
2521 if (offset > NVRAM_ADDR_MSK)
2524 ret = tg3_nvram_lock(tp);
2528 tg3_enable_nvram_access(tp);
2530 tw32(NVRAM_ADDR, offset);
2531 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2532 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2535 *val = tr32(NVRAM_RDDATA);
2537 tg3_disable_nvram_access(tp);
2539 tg3_nvram_unlock(tp);
2544 /* Ensures NVRAM data is in bytestream format. */
2545 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2548 int res = tg3_nvram_read(tp, offset, &v);
2550 *val = cpu_to_be32(v);
2554 /* tp->lock is held. */
2555 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2557 u32 addr_high, addr_low;
2560 addr_high = ((tp->dev->dev_addr[0] << 8) |
2561 tp->dev->dev_addr[1]);
2562 addr_low = ((tp->dev->dev_addr[2] << 24) |
2563 (tp->dev->dev_addr[3] << 16) |
2564 (tp->dev->dev_addr[4] << 8) |
2565 (tp->dev->dev_addr[5] << 0));
2566 for (i = 0; i < 4; i++) {
2567 if (i == 1 && skip_mac_1)
2569 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2570 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2575 for (i = 0; i < 12; i++) {
2576 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2577 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2581 addr_high = (tp->dev->dev_addr[0] +
2582 tp->dev->dev_addr[1] +
2583 tp->dev->dev_addr[2] +
2584 tp->dev->dev_addr[3] +
2585 tp->dev->dev_addr[4] +
2586 tp->dev->dev_addr[5]) &
2587 TX_BACKOFF_SEED_MASK;
2588 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2591 static void tg3_enable_register_access(struct tg3 *tp)
2594 * Make sure register accesses (indirect or otherwise) will function
2597 pci_write_config_dword(tp->pdev,
2598 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2601 static int tg3_power_up(struct tg3 *tp)
2603 tg3_enable_register_access(tp);
2605 pci_set_power_state(tp->pdev, PCI_D0);
2607 /* Switch out of Vaux if it is a NIC */
2608 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2609 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2614 static int tg3_power_down_prepare(struct tg3 *tp)
2617 bool device_should_wake, do_low_power;
2619 tg3_enable_register_access(tp);
2621 /* Restore the CLKREQ setting. */
2622 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2625 pci_read_config_word(tp->pdev,
2626 tp->pcie_cap + PCI_EXP_LNKCTL,
2628 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2629 pci_write_config_word(tp->pdev,
2630 tp->pcie_cap + PCI_EXP_LNKCTL,
2634 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2635 tw32(TG3PCI_MISC_HOST_CTRL,
2636 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2638 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2639 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2641 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2642 do_low_power = false;
2643 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2644 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2645 struct phy_device *phydev;
2646 u32 phyid, advertising;
2648 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2650 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2652 tp->link_config.orig_speed = phydev->speed;
2653 tp->link_config.orig_duplex = phydev->duplex;
2654 tp->link_config.orig_autoneg = phydev->autoneg;
2655 tp->link_config.orig_advertising = phydev->advertising;
2657 advertising = ADVERTISED_TP |
2659 ADVERTISED_Autoneg |
2660 ADVERTISED_10baseT_Half;
2662 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2663 device_should_wake) {
2664 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2666 ADVERTISED_100baseT_Half |
2667 ADVERTISED_100baseT_Full |
2668 ADVERTISED_10baseT_Full;
2670 advertising |= ADVERTISED_10baseT_Full;
2673 phydev->advertising = advertising;
2675 phy_start_aneg(phydev);
2677 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2678 if (phyid != PHY_ID_BCMAC131) {
2679 phyid &= PHY_BCM_OUI_MASK;
2680 if (phyid == PHY_BCM_OUI_1 ||
2681 phyid == PHY_BCM_OUI_2 ||
2682 phyid == PHY_BCM_OUI_3)
2683 do_low_power = true;
2687 do_low_power = true;
2689 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2690 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2691 tp->link_config.orig_speed = tp->link_config.speed;
2692 tp->link_config.orig_duplex = tp->link_config.duplex;
2693 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2696 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2697 tp->link_config.speed = SPEED_10;
2698 tp->link_config.duplex = DUPLEX_HALF;
2699 tp->link_config.autoneg = AUTONEG_ENABLE;
2700 tg3_setup_phy(tp, 0);
2704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2707 val = tr32(GRC_VCPU_EXT_CTRL);
2708 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2709 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2713 for (i = 0; i < 200; i++) {
2714 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2715 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2720 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2721 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2722 WOL_DRV_STATE_SHUTDOWN |
2726 if (device_should_wake) {
2729 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2731 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2732 tg3_phy_auxctl_write(tp,
2733 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2734 MII_TG3_AUXCTL_PCTL_WOL_EN |
2735 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2736 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2740 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2741 mac_mode = MAC_MODE_PORT_MODE_GMII;
2743 mac_mode = MAC_MODE_PORT_MODE_MII;
2745 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2746 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2748 u32 speed = (tp->tg3_flags &
2749 TG3_FLAG_WOL_SPEED_100MB) ?
2750 SPEED_100 : SPEED_10;
2751 if (tg3_5700_link_polarity(tp, speed))
2752 mac_mode |= MAC_MODE_LINK_POLARITY;
2754 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2757 mac_mode = MAC_MODE_PORT_MODE_TBI;
2760 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2761 tw32(MAC_LED_CTRL, tp->led_ctrl);
2763 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2764 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2765 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2766 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2767 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2768 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2770 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2771 mac_mode |= MAC_MODE_APE_TX_EN |
2772 MAC_MODE_APE_RX_EN |
2773 MAC_MODE_TDE_ENABLE;
2775 tw32_f(MAC_MODE, mac_mode);
2778 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2782 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2783 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2787 base_val = tp->pci_clock_ctrl;
2788 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2789 CLOCK_CTRL_TXCLK_DISABLE);
2791 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2792 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2793 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2794 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2795 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2797 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2798 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2799 u32 newbits1, newbits2;
2801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2803 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2804 CLOCK_CTRL_TXCLK_DISABLE |
2806 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2807 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2808 newbits1 = CLOCK_CTRL_625_CORE;
2809 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2811 newbits1 = CLOCK_CTRL_ALTCLK;
2812 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2818 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2821 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2826 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2827 CLOCK_CTRL_TXCLK_DISABLE |
2828 CLOCK_CTRL_44MHZ_CORE);
2830 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2834 tp->pci_clock_ctrl | newbits3, 40);
2838 if (!(device_should_wake) &&
2839 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2840 tg3_power_down_phy(tp, do_low_power);
2842 tg3_frob_aux_power(tp);
2844 /* Workaround for unstable PLL clock */
2845 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2846 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2847 u32 val = tr32(0x7d00);
2849 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2851 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2854 err = tg3_nvram_lock(tp);
2855 tg3_halt_cpu(tp, RX_CPU_BASE);
2857 tg3_nvram_unlock(tp);
2861 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2866 static void tg3_power_down(struct tg3 *tp)
2868 tg3_power_down_prepare(tp);
2870 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2871 pci_set_power_state(tp->pdev, PCI_D3hot);
2874 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2876 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2877 case MII_TG3_AUX_STAT_10HALF:
2879 *duplex = DUPLEX_HALF;
2882 case MII_TG3_AUX_STAT_10FULL:
2884 *duplex = DUPLEX_FULL;
2887 case MII_TG3_AUX_STAT_100HALF:
2889 *duplex = DUPLEX_HALF;
2892 case MII_TG3_AUX_STAT_100FULL:
2894 *duplex = DUPLEX_FULL;
2897 case MII_TG3_AUX_STAT_1000HALF:
2898 *speed = SPEED_1000;
2899 *duplex = DUPLEX_HALF;
2902 case MII_TG3_AUX_STAT_1000FULL:
2903 *speed = SPEED_1000;
2904 *duplex = DUPLEX_FULL;
2908 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2909 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2911 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2915 *speed = SPEED_INVALID;
2916 *duplex = DUPLEX_INVALID;
2921 static void tg3_phy_copper_begin(struct tg3 *tp)
2926 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2927 /* Entering low power mode. Disable gigabit and
2928 * 100baseT advertisements.
2930 tg3_writephy(tp, MII_TG3_CTRL, 0);
2932 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2933 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2934 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2935 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2937 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2938 } else if (tp->link_config.speed == SPEED_INVALID) {
2939 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2940 tp->link_config.advertising &=
2941 ~(ADVERTISED_1000baseT_Half |
2942 ADVERTISED_1000baseT_Full);
2944 new_adv = ADVERTISE_CSMA;
2945 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2946 new_adv |= ADVERTISE_10HALF;
2947 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2948 new_adv |= ADVERTISE_10FULL;
2949 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2950 new_adv |= ADVERTISE_100HALF;
2951 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2952 new_adv |= ADVERTISE_100FULL;
2954 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2956 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2958 if (tp->link_config.advertising &
2959 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2961 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2962 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2963 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2964 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2965 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2966 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2967 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2968 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2969 MII_TG3_CTRL_ENABLE_AS_MASTER);
2970 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2972 tg3_writephy(tp, MII_TG3_CTRL, 0);
2975 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2976 new_adv |= ADVERTISE_CSMA;
2978 /* Asking for a specific link mode. */
2979 if (tp->link_config.speed == SPEED_1000) {
2980 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2982 if (tp->link_config.duplex == DUPLEX_FULL)
2983 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2985 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2986 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2987 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2988 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2989 MII_TG3_CTRL_ENABLE_AS_MASTER);
2991 if (tp->link_config.speed == SPEED_100) {
2992 if (tp->link_config.duplex == DUPLEX_FULL)
2993 new_adv |= ADVERTISE_100FULL;
2995 new_adv |= ADVERTISE_100HALF;
2997 if (tp->link_config.duplex == DUPLEX_FULL)
2998 new_adv |= ADVERTISE_10FULL;
3000 new_adv |= ADVERTISE_10HALF;
3002 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3007 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3010 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3013 tw32(TG3_CPMU_EEE_MODE,
3014 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3016 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3018 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3020 case ASIC_REV_57765:
3021 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3022 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3023 MII_TG3_DSP_CH34TP2_HIBW01);
3026 val = MII_TG3_DSP_TAP26_ALNOKO |
3027 MII_TG3_DSP_TAP26_RMRXSTO |
3028 MII_TG3_DSP_TAP26_OPCSINPT;
3029 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3033 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3034 /* Advertise 100-BaseTX EEE ability */
3035 if (tp->link_config.advertising &
3036 ADVERTISED_100baseT_Full)
3037 val |= MDIO_AN_EEE_ADV_100TX;
3038 /* Advertise 1000-BaseT EEE ability */
3039 if (tp->link_config.advertising &
3040 ADVERTISED_1000baseT_Full)
3041 val |= MDIO_AN_EEE_ADV_1000T;
3043 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3045 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3048 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3049 tp->link_config.speed != SPEED_INVALID) {
3050 u32 bmcr, orig_bmcr;
3052 tp->link_config.active_speed = tp->link_config.speed;
3053 tp->link_config.active_duplex = tp->link_config.duplex;
3056 switch (tp->link_config.speed) {
3062 bmcr |= BMCR_SPEED100;
3066 bmcr |= TG3_BMCR_SPEED1000;
3070 if (tp->link_config.duplex == DUPLEX_FULL)
3071 bmcr |= BMCR_FULLDPLX;
3073 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3074 (bmcr != orig_bmcr)) {
3075 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3076 for (i = 0; i < 1500; i++) {
3080 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3081 tg3_readphy(tp, MII_BMSR, &tmp))
3083 if (!(tmp & BMSR_LSTATUS)) {
3088 tg3_writephy(tp, MII_BMCR, bmcr);
3092 tg3_writephy(tp, MII_BMCR,
3093 BMCR_ANENABLE | BMCR_ANRESTART);
3097 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3101 /* Turn off tap power management. */
3102 /* Set Extended packet length bit */
3103 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3105 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3106 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3107 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3108 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3109 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3116 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3118 u32 adv_reg, all_mask = 0;
3120 if (mask & ADVERTISED_10baseT_Half)
3121 all_mask |= ADVERTISE_10HALF;
3122 if (mask & ADVERTISED_10baseT_Full)
3123 all_mask |= ADVERTISE_10FULL;
3124 if (mask & ADVERTISED_100baseT_Half)
3125 all_mask |= ADVERTISE_100HALF;
3126 if (mask & ADVERTISED_100baseT_Full)
3127 all_mask |= ADVERTISE_100FULL;
3129 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3132 if ((adv_reg & all_mask) != all_mask)
3134 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3138 if (mask & ADVERTISED_1000baseT_Half)
3139 all_mask |= ADVERTISE_1000HALF;
3140 if (mask & ADVERTISED_1000baseT_Full)
3141 all_mask |= ADVERTISE_1000FULL;
3143 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3146 if ((tg3_ctrl & all_mask) != all_mask)
3152 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3156 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3159 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3160 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3162 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3163 if (curadv != reqadv)
3166 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3167 tg3_readphy(tp, MII_LPA, rmtadv);
3169 /* Reprogram the advertisement register, even if it
3170 * does not affect the current link. If the link
3171 * gets renegotiated in the future, we can save an
3172 * additional renegotiation cycle by advertising
3173 * it correctly in the first place.
3175 if (curadv != reqadv) {
3176 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3177 ADVERTISE_PAUSE_ASYM);
3178 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3185 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3187 int current_link_up;
3189 u32 lcl_adv, rmt_adv;
3197 (MAC_STATUS_SYNC_CHANGED |
3198 MAC_STATUS_CFG_CHANGED |
3199 MAC_STATUS_MI_COMPLETION |
3200 MAC_STATUS_LNKSTATE_CHANGED));
3203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3205 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3209 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3211 /* Some third-party PHYs need to be reset on link going
3214 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3217 netif_carrier_ok(tp->dev)) {
3218 tg3_readphy(tp, MII_BMSR, &bmsr);
3219 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3220 !(bmsr & BMSR_LSTATUS))
3226 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3227 tg3_readphy(tp, MII_BMSR, &bmsr);
3228 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3229 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3232 if (!(bmsr & BMSR_LSTATUS)) {
3233 err = tg3_init_5401phy_dsp(tp);
3237 tg3_readphy(tp, MII_BMSR, &bmsr);
3238 for (i = 0; i < 1000; i++) {
3240 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3241 (bmsr & BMSR_LSTATUS)) {
3247 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3248 TG3_PHY_REV_BCM5401_B0 &&
3249 !(bmsr & BMSR_LSTATUS) &&
3250 tp->link_config.active_speed == SPEED_1000) {
3251 err = tg3_phy_reset(tp);
3253 err = tg3_init_5401phy_dsp(tp);
3258 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3259 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3260 /* 5701 {A0,B0} CRC bug workaround */
3261 tg3_writephy(tp, 0x15, 0x0a75);
3262 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3263 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3264 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3267 /* Clear pending interrupts... */
3268 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3269 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3271 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3272 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3273 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3274 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3278 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3279 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3280 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3282 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3285 current_link_up = 0;
3286 current_speed = SPEED_INVALID;
3287 current_duplex = DUPLEX_INVALID;
3289 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3290 err = tg3_phy_auxctl_read(tp,
3291 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3293 if (!err && !(val & (1 << 10))) {
3294 tg3_phy_auxctl_write(tp,
3295 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3302 for (i = 0; i < 100; i++) {
3303 tg3_readphy(tp, MII_BMSR, &bmsr);
3304 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3305 (bmsr & BMSR_LSTATUS))
3310 if (bmsr & BMSR_LSTATUS) {
3313 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3314 for (i = 0; i < 2000; i++) {
3316 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3321 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3326 for (i = 0; i < 200; i++) {
3327 tg3_readphy(tp, MII_BMCR, &bmcr);
3328 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3330 if (bmcr && bmcr != 0x7fff)
3338 tp->link_config.active_speed = current_speed;
3339 tp->link_config.active_duplex = current_duplex;
3341 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3342 if ((bmcr & BMCR_ANENABLE) &&
3343 tg3_copper_is_advertising_all(tp,
3344 tp->link_config.advertising)) {
3345 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3347 current_link_up = 1;
3350 if (!(bmcr & BMCR_ANENABLE) &&
3351 tp->link_config.speed == current_speed &&
3352 tp->link_config.duplex == current_duplex &&
3353 tp->link_config.flowctrl ==
3354 tp->link_config.active_flowctrl) {
3355 current_link_up = 1;
3359 if (current_link_up == 1 &&
3360 tp->link_config.active_duplex == DUPLEX_FULL)
3361 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3365 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3366 tg3_phy_copper_begin(tp);
3368 tg3_readphy(tp, MII_BMSR, &bmsr);
3369 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3370 (bmsr & BMSR_LSTATUS))
3371 current_link_up = 1;
3374 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3375 if (current_link_up == 1) {
3376 if (tp->link_config.active_speed == SPEED_100 ||
3377 tp->link_config.active_speed == SPEED_10)
3378 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3380 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3381 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3382 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3384 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3386 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3387 if (tp->link_config.active_duplex == DUPLEX_HALF)
3388 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3391 if (current_link_up == 1 &&
3392 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3393 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3395 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3398 /* ??? Without this setting Netgear GA302T PHY does not
3399 * ??? send/receive packets...
3401 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3402 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3403 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3404 tw32_f(MAC_MI_MODE, tp->mi_mode);
3408 tw32_f(MAC_MODE, tp->mac_mode);
3411 tg3_phy_eee_adjust(tp, current_link_up);
3413 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3414 /* Polled via timer. */
3415 tw32_f(MAC_EVENT, 0);
3417 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3422 current_link_up == 1 &&
3423 tp->link_config.active_speed == SPEED_1000 &&
3424 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3425 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3428 (MAC_STATUS_SYNC_CHANGED |
3429 MAC_STATUS_CFG_CHANGED));
3432 NIC_SRAM_FIRMWARE_MBOX,
3433 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3436 /* Prevent send BD corruption. */
3437 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3438 u16 oldlnkctl, newlnkctl;
3440 pci_read_config_word(tp->pdev,
3441 tp->pcie_cap + PCI_EXP_LNKCTL,
3443 if (tp->link_config.active_speed == SPEED_100 ||
3444 tp->link_config.active_speed == SPEED_10)
3445 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3447 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3448 if (newlnkctl != oldlnkctl)
3449 pci_write_config_word(tp->pdev,
3450 tp->pcie_cap + PCI_EXP_LNKCTL,
3454 if (current_link_up != netif_carrier_ok(tp->dev)) {
3455 if (current_link_up)
3456 netif_carrier_on(tp->dev);
3458 netif_carrier_off(tp->dev);
3459 tg3_link_report(tp);
3465 struct tg3_fiber_aneginfo {
3467 #define ANEG_STATE_UNKNOWN 0
3468 #define ANEG_STATE_AN_ENABLE 1
3469 #define ANEG_STATE_RESTART_INIT 2
3470 #define ANEG_STATE_RESTART 3
3471 #define ANEG_STATE_DISABLE_LINK_OK 4
3472 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3473 #define ANEG_STATE_ABILITY_DETECT 6
3474 #define ANEG_STATE_ACK_DETECT_INIT 7
3475 #define ANEG_STATE_ACK_DETECT 8
3476 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3477 #define ANEG_STATE_COMPLETE_ACK 10
3478 #define ANEG_STATE_IDLE_DETECT_INIT 11
3479 #define ANEG_STATE_IDLE_DETECT 12
3480 #define ANEG_STATE_LINK_OK 13
3481 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3482 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3485 #define MR_AN_ENABLE 0x00000001
3486 #define MR_RESTART_AN 0x00000002
3487 #define MR_AN_COMPLETE 0x00000004
3488 #define MR_PAGE_RX 0x00000008
3489 #define MR_NP_LOADED 0x00000010
3490 #define MR_TOGGLE_TX 0x00000020
3491 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3492 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3493 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3494 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3495 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3496 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3497 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3498 #define MR_TOGGLE_RX 0x00002000
3499 #define MR_NP_RX 0x00004000
3501 #define MR_LINK_OK 0x80000000
3503 unsigned long link_time, cur_time;
3505 u32 ability_match_cfg;
3506 int ability_match_count;
3508 char ability_match, idle_match, ack_match;
3510 u32 txconfig, rxconfig;
3511 #define ANEG_CFG_NP 0x00000080
3512 #define ANEG_CFG_ACK 0x00000040
3513 #define ANEG_CFG_RF2 0x00000020
3514 #define ANEG_CFG_RF1 0x00000010
3515 #define ANEG_CFG_PS2 0x00000001
3516 #define ANEG_CFG_PS1 0x00008000
3517 #define ANEG_CFG_HD 0x00004000
3518 #define ANEG_CFG_FD 0x00002000
3519 #define ANEG_CFG_INVAL 0x00001f06
3524 #define ANEG_TIMER_ENAB 2
3525 #define ANEG_FAILED -1
3527 #define ANEG_STATE_SETTLE_TIME 10000
3529 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3530 struct tg3_fiber_aneginfo *ap)
3533 unsigned long delta;
3537 if (ap->state == ANEG_STATE_UNKNOWN) {
3541 ap->ability_match_cfg = 0;
3542 ap->ability_match_count = 0;
3543 ap->ability_match = 0;
3549 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3550 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3552 if (rx_cfg_reg != ap->ability_match_cfg) {
3553 ap->ability_match_cfg = rx_cfg_reg;
3554 ap->ability_match = 0;
3555 ap->ability_match_count = 0;
3557 if (++ap->ability_match_count > 1) {
3558 ap->ability_match = 1;
3559 ap->ability_match_cfg = rx_cfg_reg;
3562 if (rx_cfg_reg & ANEG_CFG_ACK)
3570 ap->ability_match_cfg = 0;
3571 ap->ability_match_count = 0;
3572 ap->ability_match = 0;
3578 ap->rxconfig = rx_cfg_reg;
3581 switch (ap->state) {
3582 case ANEG_STATE_UNKNOWN:
3583 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3584 ap->state = ANEG_STATE_AN_ENABLE;
3587 case ANEG_STATE_AN_ENABLE:
3588 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3589 if (ap->flags & MR_AN_ENABLE) {
3592 ap->ability_match_cfg = 0;
3593 ap->ability_match_count = 0;
3594 ap->ability_match = 0;
3598 ap->state = ANEG_STATE_RESTART_INIT;
3600 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3604 case ANEG_STATE_RESTART_INIT:
3605 ap->link_time = ap->cur_time;
3606 ap->flags &= ~(MR_NP_LOADED);
3608 tw32(MAC_TX_AUTO_NEG, 0);
3609 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3610 tw32_f(MAC_MODE, tp->mac_mode);
3613 ret = ANEG_TIMER_ENAB;
3614 ap->state = ANEG_STATE_RESTART;
3617 case ANEG_STATE_RESTART:
3618 delta = ap->cur_time - ap->link_time;
3619 if (delta > ANEG_STATE_SETTLE_TIME)
3620 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3622 ret = ANEG_TIMER_ENAB;
3625 case ANEG_STATE_DISABLE_LINK_OK:
3629 case ANEG_STATE_ABILITY_DETECT_INIT:
3630 ap->flags &= ~(MR_TOGGLE_TX);
3631 ap->txconfig = ANEG_CFG_FD;
3632 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3633 if (flowctrl & ADVERTISE_1000XPAUSE)
3634 ap->txconfig |= ANEG_CFG_PS1;
3635 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3636 ap->txconfig |= ANEG_CFG_PS2;
3637 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3638 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3639 tw32_f(MAC_MODE, tp->mac_mode);
3642 ap->state = ANEG_STATE_ABILITY_DETECT;
3645 case ANEG_STATE_ABILITY_DETECT:
3646 if (ap->ability_match != 0 && ap->rxconfig != 0)
3647 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3650 case ANEG_STATE_ACK_DETECT_INIT:
3651 ap->txconfig |= ANEG_CFG_ACK;
3652 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3653 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3654 tw32_f(MAC_MODE, tp->mac_mode);
3657 ap->state = ANEG_STATE_ACK_DETECT;
3660 case ANEG_STATE_ACK_DETECT:
3661 if (ap->ack_match != 0) {
3662 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3663 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3664 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3666 ap->state = ANEG_STATE_AN_ENABLE;
3668 } else if (ap->ability_match != 0 &&
3669 ap->rxconfig == 0) {
3670 ap->state = ANEG_STATE_AN_ENABLE;
3674 case ANEG_STATE_COMPLETE_ACK_INIT:
3675 if (ap->rxconfig & ANEG_CFG_INVAL) {
3679 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3680 MR_LP_ADV_HALF_DUPLEX |
3681 MR_LP_ADV_SYM_PAUSE |
3682 MR_LP_ADV_ASYM_PAUSE |
3683 MR_LP_ADV_REMOTE_FAULT1 |
3684 MR_LP_ADV_REMOTE_FAULT2 |
3685 MR_LP_ADV_NEXT_PAGE |
3688 if (ap->rxconfig & ANEG_CFG_FD)
3689 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3690 if (ap->rxconfig & ANEG_CFG_HD)
3691 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3692 if (ap->rxconfig & ANEG_CFG_PS1)
3693 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3694 if (ap->rxconfig & ANEG_CFG_PS2)
3695 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3696 if (ap->rxconfig & ANEG_CFG_RF1)
3697 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3698 if (ap->rxconfig & ANEG_CFG_RF2)
3699 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3700 if (ap->rxconfig & ANEG_CFG_NP)
3701 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3703 ap->link_time = ap->cur_time;
3705 ap->flags ^= (MR_TOGGLE_TX);
3706 if (ap->rxconfig & 0x0008)
3707 ap->flags |= MR_TOGGLE_RX;
3708 if (ap->rxconfig & ANEG_CFG_NP)
3709 ap->flags |= MR_NP_RX;
3710 ap->flags |= MR_PAGE_RX;
3712 ap->state = ANEG_STATE_COMPLETE_ACK;
3713 ret = ANEG_TIMER_ENAB;
3716 case ANEG_STATE_COMPLETE_ACK:
3717 if (ap->ability_match != 0 &&
3718 ap->rxconfig == 0) {
3719 ap->state = ANEG_STATE_AN_ENABLE;
3722 delta = ap->cur_time - ap->link_time;
3723 if (delta > ANEG_STATE_SETTLE_TIME) {
3724 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3725 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3727 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3728 !(ap->flags & MR_NP_RX)) {
3729 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737 case ANEG_STATE_IDLE_DETECT_INIT:
3738 ap->link_time = ap->cur_time;
3739 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3740 tw32_f(MAC_MODE, tp->mac_mode);
3743 ap->state = ANEG_STATE_IDLE_DETECT;
3744 ret = ANEG_TIMER_ENAB;
3747 case ANEG_STATE_IDLE_DETECT:
3748 if (ap->ability_match != 0 &&
3749 ap->rxconfig == 0) {
3750 ap->state = ANEG_STATE_AN_ENABLE;
3753 delta = ap->cur_time - ap->link_time;
3754 if (delta > ANEG_STATE_SETTLE_TIME) {
3755 /* XXX another gem from the Broadcom driver :( */
3756 ap->state = ANEG_STATE_LINK_OK;
3760 case ANEG_STATE_LINK_OK:
3761 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3765 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3766 /* ??? unimplemented */
3769 case ANEG_STATE_NEXT_PAGE_WAIT:
3770 /* ??? unimplemented */
3781 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3784 struct tg3_fiber_aneginfo aninfo;
3785 int status = ANEG_FAILED;
3789 tw32_f(MAC_TX_AUTO_NEG, 0);
3791 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3792 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3795 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3798 memset(&aninfo, 0, sizeof(aninfo));
3799 aninfo.flags |= MR_AN_ENABLE;
3800 aninfo.state = ANEG_STATE_UNKNOWN;
3801 aninfo.cur_time = 0;
3803 while (++tick < 195000) {
3804 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3805 if (status == ANEG_DONE || status == ANEG_FAILED)
3811 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3812 tw32_f(MAC_MODE, tp->mac_mode);
3815 *txflags = aninfo.txconfig;
3816 *rxflags = aninfo.flags;
3818 if (status == ANEG_DONE &&
3819 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3820 MR_LP_ADV_FULL_DUPLEX)))
3826 static void tg3_init_bcm8002(struct tg3 *tp)
3828 u32 mac_status = tr32(MAC_STATUS);
3831 /* Reset when initting first time or we have a link. */
3832 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3833 !(mac_status & MAC_STATUS_PCS_SYNCED))
3836 /* Set PLL lock range. */
3837 tg3_writephy(tp, 0x16, 0x8007);
3840 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3842 /* Wait for reset to complete. */
3843 /* XXX schedule_timeout() ... */
3844 for (i = 0; i < 500; i++)
3847 /* Config mode; select PMA/Ch 1 regs. */
3848 tg3_writephy(tp, 0x10, 0x8411);
3850 /* Enable auto-lock and comdet, select txclk for tx. */
3851 tg3_writephy(tp, 0x11, 0x0a10);
3853 tg3_writephy(tp, 0x18, 0x00a0);
3854 tg3_writephy(tp, 0x16, 0x41ff);
3856 /* Assert and deassert POR. */
3857 tg3_writephy(tp, 0x13, 0x0400);
3859 tg3_writephy(tp, 0x13, 0x0000);
3861 tg3_writephy(tp, 0x11, 0x0a50);
3863 tg3_writephy(tp, 0x11, 0x0a10);
3865 /* Wait for signal to stabilize */
3866 /* XXX schedule_timeout() ... */
3867 for (i = 0; i < 15000; i++)
3870 /* Deselect the channel register so we can read the PHYID
3873 tg3_writephy(tp, 0x10, 0x8011);
3876 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3879 u32 sg_dig_ctrl, sg_dig_status;
3880 u32 serdes_cfg, expected_sg_dig_ctrl;
3881 int workaround, port_a;
3882 int current_link_up;
3885 expected_sg_dig_ctrl = 0;
3888 current_link_up = 0;
3890 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3891 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3893 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3896 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3897 /* preserve bits 20-23 for voltage regulator */
3898 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3901 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3903 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3904 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3906 u32 val = serdes_cfg;
3912 tw32_f(MAC_SERDES_CFG, val);
3915 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3917 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3918 tg3_setup_flow_control(tp, 0, 0);
3919 current_link_up = 1;
3924 /* Want auto-negotiation. */
3925 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3927 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3928 if (flowctrl & ADVERTISE_1000XPAUSE)
3929 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3930 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3931 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3933 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3934 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3935 tp->serdes_counter &&
3936 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3937 MAC_STATUS_RCVD_CFG)) ==
3938 MAC_STATUS_PCS_SYNCED)) {
3939 tp->serdes_counter--;
3940 current_link_up = 1;
3945 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3946 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3948 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3950 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3951 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3952 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3953 MAC_STATUS_SIGNAL_DET)) {
3954 sg_dig_status = tr32(SG_DIG_STATUS);
3955 mac_status = tr32(MAC_STATUS);
3957 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3958 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3959 u32 local_adv = 0, remote_adv = 0;
3961 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3962 local_adv |= ADVERTISE_1000XPAUSE;
3963 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3964 local_adv |= ADVERTISE_1000XPSE_ASYM;
3966 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3967 remote_adv |= LPA_1000XPAUSE;
3968 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3969 remote_adv |= LPA_1000XPAUSE_ASYM;
3971 tg3_setup_flow_control(tp, local_adv, remote_adv);
3972 current_link_up = 1;
3973 tp->serdes_counter = 0;
3974 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3975 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3976 if (tp->serdes_counter)
3977 tp->serdes_counter--;
3980 u32 val = serdes_cfg;
3987 tw32_f(MAC_SERDES_CFG, val);
3990 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3993 /* Link parallel detection - link is up */
3994 /* only if we have PCS_SYNC and not */
3995 /* receiving config code words */
3996 mac_status = tr32(MAC_STATUS);
3997 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3998 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3999 tg3_setup_flow_control(tp, 0, 0);
4000 current_link_up = 1;
4002 TG3_PHYFLG_PARALLEL_DETECT;
4003 tp->serdes_counter =
4004 SERDES_PARALLEL_DET_TIMEOUT;
4006 goto restart_autoneg;
4010 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4011 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4015 return current_link_up;
4018 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4020 int current_link_up = 0;
4022 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4025 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4026 u32 txflags, rxflags;
4029 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4030 u32 local_adv = 0, remote_adv = 0;
4032 if (txflags & ANEG_CFG_PS1)
4033 local_adv |= ADVERTISE_1000XPAUSE;
4034 if (txflags & ANEG_CFG_PS2)
4035 local_adv |= ADVERTISE_1000XPSE_ASYM;
4037 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4038 remote_adv |= LPA_1000XPAUSE;
4039 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4040 remote_adv |= LPA_1000XPAUSE_ASYM;
4042 tg3_setup_flow_control(tp, local_adv, remote_adv);
4044 current_link_up = 1;
4046 for (i = 0; i < 30; i++) {
4049 (MAC_STATUS_SYNC_CHANGED |
4050 MAC_STATUS_CFG_CHANGED));
4052 if ((tr32(MAC_STATUS) &
4053 (MAC_STATUS_SYNC_CHANGED |
4054 MAC_STATUS_CFG_CHANGED)) == 0)
4058 mac_status = tr32(MAC_STATUS);
4059 if (current_link_up == 0 &&
4060 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4061 !(mac_status & MAC_STATUS_RCVD_CFG))
4062 current_link_up = 1;
4064 tg3_setup_flow_control(tp, 0, 0);
4066 /* Forcing 1000FD link up. */
4067 current_link_up = 1;
4069 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4072 tw32_f(MAC_MODE, tp->mac_mode);
4077 return current_link_up;
4080 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4083 u16 orig_active_speed;
4084 u8 orig_active_duplex;
4086 int current_link_up;
4089 orig_pause_cfg = tp->link_config.active_flowctrl;
4090 orig_active_speed = tp->link_config.active_speed;
4091 orig_active_duplex = tp->link_config.active_duplex;
4093 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4094 netif_carrier_ok(tp->dev) &&
4095 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4096 mac_status = tr32(MAC_STATUS);
4097 mac_status &= (MAC_STATUS_PCS_SYNCED |
4098 MAC_STATUS_SIGNAL_DET |
4099 MAC_STATUS_CFG_CHANGED |
4100 MAC_STATUS_RCVD_CFG);
4101 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4102 MAC_STATUS_SIGNAL_DET)) {
4103 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4104 MAC_STATUS_CFG_CHANGED));
4109 tw32_f(MAC_TX_AUTO_NEG, 0);
4111 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4112 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4113 tw32_f(MAC_MODE, tp->mac_mode);
4116 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4117 tg3_init_bcm8002(tp);
4119 /* Enable link change event even when serdes polling. */
4120 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4123 current_link_up = 0;
4124 mac_status = tr32(MAC_STATUS);
4126 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4127 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4129 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4131 tp->napi[0].hw_status->status =
4132 (SD_STATUS_UPDATED |
4133 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4135 for (i = 0; i < 100; i++) {
4136 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4137 MAC_STATUS_CFG_CHANGED));
4139 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4140 MAC_STATUS_CFG_CHANGED |
4141 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4145 mac_status = tr32(MAC_STATUS);
4146 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4147 current_link_up = 0;
4148 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4149 tp->serdes_counter == 0) {
4150 tw32_f(MAC_MODE, (tp->mac_mode |
4151 MAC_MODE_SEND_CONFIGS));
4153 tw32_f(MAC_MODE, tp->mac_mode);
4157 if (current_link_up == 1) {
4158 tp->link_config.active_speed = SPEED_1000;
4159 tp->link_config.active_duplex = DUPLEX_FULL;
4160 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4161 LED_CTRL_LNKLED_OVERRIDE |
4162 LED_CTRL_1000MBPS_ON));
4164 tp->link_config.active_speed = SPEED_INVALID;
4165 tp->link_config.active_duplex = DUPLEX_INVALID;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_TRAFFIC_OVERRIDE));
4171 if (current_link_up != netif_carrier_ok(tp->dev)) {
4172 if (current_link_up)
4173 netif_carrier_on(tp->dev);
4175 netif_carrier_off(tp->dev);
4176 tg3_link_report(tp);
4178 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4179 if (orig_pause_cfg != now_pause_cfg ||
4180 orig_active_speed != tp->link_config.active_speed ||
4181 orig_active_duplex != tp->link_config.active_duplex)
4182 tg3_link_report(tp);
4188 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4190 int current_link_up, err = 0;
4194 u32 local_adv, remote_adv;
4196 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4197 tw32_f(MAC_MODE, tp->mac_mode);
4203 (MAC_STATUS_SYNC_CHANGED |
4204 MAC_STATUS_CFG_CHANGED |
4205 MAC_STATUS_MI_COMPLETION |
4206 MAC_STATUS_LNKSTATE_CHANGED));
4212 current_link_up = 0;
4213 current_speed = SPEED_INVALID;
4214 current_duplex = DUPLEX_INVALID;
4216 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4217 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4219 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4220 bmsr |= BMSR_LSTATUS;
4222 bmsr &= ~BMSR_LSTATUS;
4225 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4227 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4228 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4229 /* do nothing, just check for link up at the end */
4230 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4233 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4234 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4235 ADVERTISE_1000XPAUSE |
4236 ADVERTISE_1000XPSE_ASYM |
4239 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4241 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4242 new_adv |= ADVERTISE_1000XHALF;
4243 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4244 new_adv |= ADVERTISE_1000XFULL;
4246 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4247 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4248 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4249 tg3_writephy(tp, MII_BMCR, bmcr);
4251 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4252 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4253 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4260 bmcr &= ~BMCR_SPEED1000;
4261 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4263 if (tp->link_config.duplex == DUPLEX_FULL)
4264 new_bmcr |= BMCR_FULLDPLX;
4266 if (new_bmcr != bmcr) {
4267 /* BMCR_SPEED1000 is a reserved bit that needs
4268 * to be set on write.
4270 new_bmcr |= BMCR_SPEED1000;
4272 /* Force a linkdown */
4273 if (netif_carrier_ok(tp->dev)) {
4276 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4277 adv &= ~(ADVERTISE_1000XFULL |
4278 ADVERTISE_1000XHALF |
4280 tg3_writephy(tp, MII_ADVERTISE, adv);
4281 tg3_writephy(tp, MII_BMCR, bmcr |
4285 netif_carrier_off(tp->dev);
4287 tg3_writephy(tp, MII_BMCR, new_bmcr);
4289 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4290 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4291 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4293 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4294 bmsr |= BMSR_LSTATUS;
4296 bmsr &= ~BMSR_LSTATUS;
4298 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4302 if (bmsr & BMSR_LSTATUS) {
4303 current_speed = SPEED_1000;
4304 current_link_up = 1;
4305 if (bmcr & BMCR_FULLDPLX)
4306 current_duplex = DUPLEX_FULL;
4308 current_duplex = DUPLEX_HALF;
4313 if (bmcr & BMCR_ANENABLE) {
4316 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4317 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4318 common = local_adv & remote_adv;
4319 if (common & (ADVERTISE_1000XHALF |
4320 ADVERTISE_1000XFULL)) {
4321 if (common & ADVERTISE_1000XFULL)
4322 current_duplex = DUPLEX_FULL;
4324 current_duplex = DUPLEX_HALF;
4325 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4326 /* Link is up via parallel detect */
4328 current_link_up = 0;
4333 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4334 tg3_setup_flow_control(tp, local_adv, remote_adv);
4336 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4337 if (tp->link_config.active_duplex == DUPLEX_HALF)
4338 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4340 tw32_f(MAC_MODE, tp->mac_mode);
4343 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4345 tp->link_config.active_speed = current_speed;
4346 tp->link_config.active_duplex = current_duplex;
4348 if (current_link_up != netif_carrier_ok(tp->dev)) {
4349 if (current_link_up)
4350 netif_carrier_on(tp->dev);
4352 netif_carrier_off(tp->dev);
4353 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4355 tg3_link_report(tp);
4360 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4362 if (tp->serdes_counter) {
4363 /* Give autoneg time to complete. */
4364 tp->serdes_counter--;
4368 if (!netif_carrier_ok(tp->dev) &&
4369 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4372 tg3_readphy(tp, MII_BMCR, &bmcr);
4373 if (bmcr & BMCR_ANENABLE) {
4376 /* Select shadow register 0x1f */
4377 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4378 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4380 /* Select expansion interrupt status register */
4381 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4382 MII_TG3_DSP_EXP1_INT_STAT);
4383 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4384 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4386 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4387 /* We have signal detect and not receiving
4388 * config code words, link is up by parallel
4392 bmcr &= ~BMCR_ANENABLE;
4393 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4394 tg3_writephy(tp, MII_BMCR, bmcr);
4395 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4398 } else if (netif_carrier_ok(tp->dev) &&
4399 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4403 /* Select expansion interrupt status register */
4404 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4405 MII_TG3_DSP_EXP1_INT_STAT);
4406 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4410 /* Config code words received, turn on autoneg. */
4411 tg3_readphy(tp, MII_BMCR, &bmcr);
4412 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4414 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4420 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4425 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4426 err = tg3_setup_fiber_phy(tp, force_reset);
4427 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4428 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4430 err = tg3_setup_copper_phy(tp, force_reset);
4432 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4435 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4436 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4438 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4443 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4444 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4445 tw32(GRC_MISC_CFG, val);
4448 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4449 (6 << TX_LENGTHS_IPG_SHIFT);
4450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4451 val |= tr32(MAC_TX_LENGTHS) &
4452 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4453 TX_LENGTHS_CNT_DWN_VAL_MSK);
4455 if (tp->link_config.active_speed == SPEED_1000 &&
4456 tp->link_config.active_duplex == DUPLEX_HALF)
4457 tw32(MAC_TX_LENGTHS, val |
4458 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4460 tw32(MAC_TX_LENGTHS, val |
4461 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4463 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4464 if (netif_carrier_ok(tp->dev)) {
4465 tw32(HOSTCC_STAT_COAL_TICKS,
4466 tp->coal.stats_block_coalesce_usecs);
4468 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4472 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4473 val = tr32(PCIE_PWR_MGMT_THRESH);
4474 if (!netif_carrier_ok(tp->dev))
4475 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4478 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4479 tw32(PCIE_PWR_MGMT_THRESH, val);
4485 static inline int tg3_irq_sync(struct tg3 *tp)
4487 return tp->irq_sync;
4490 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4494 dst = (u32 *)((u8 *)dst + off);
4495 for (i = 0; i < len; i += sizeof(u32))
4496 *dst++ = tr32(off + i);
4499 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4501 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4502 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4503 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4504 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4505 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4506 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4507 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4508 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4509 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4510 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4511 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4512 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4513 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4514 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4517 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4518 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4519 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4521 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)
4522 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4524 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4525 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4526 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4527 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4528 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4529 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4530 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4531 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4533 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4534 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4539 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4540 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4541 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4542 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4543 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4545 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4546 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4549 static void tg3_dump_state(struct tg3 *tp)
4554 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4556 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4560 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4561 /* Read up to but not including private PCI registers */
4562 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4563 regs[i / sizeof(u32)] = tr32(i);
4565 tg3_dump_legacy_regs(tp, regs);
4567 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4568 if (!regs[i + 0] && !regs[i + 1] &&
4569 !regs[i + 2] && !regs[i + 3])
4572 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4574 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4579 for (i = 0; i < tp->irq_cnt; i++) {
4580 struct tg3_napi *tnapi = &tp->napi[i];
4582 /* SW status block */
4584 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4586 tnapi->hw_status->status,
4587 tnapi->hw_status->status_tag,
4588 tnapi->hw_status->rx_jumbo_consumer,
4589 tnapi->hw_status->rx_consumer,
4590 tnapi->hw_status->rx_mini_consumer,
4591 tnapi->hw_status->idx[0].rx_producer,
4592 tnapi->hw_status->idx[0].tx_consumer);
4595 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4597 tnapi->last_tag, tnapi->last_irq_tag,
4598 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4600 tnapi->prodring.rx_std_prod_idx,
4601 tnapi->prodring.rx_std_cons_idx,
4602 tnapi->prodring.rx_jmb_prod_idx,
4603 tnapi->prodring.rx_jmb_cons_idx);
4607 /* This is called whenever we suspect that the system chipset is re-
4608 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4609 * is bogus tx completions. We try to recover by setting the
4610 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4613 static void tg3_tx_recover(struct tg3 *tp)
4615 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4616 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4618 netdev_warn(tp->dev,
4619 "The system may be re-ordering memory-mapped I/O "
4620 "cycles to the network device, attempting to recover. "
4621 "Please report the problem to the driver maintainer "
4622 "and include system chipset information.\n");
4624 spin_lock(&tp->lock);
4625 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4626 spin_unlock(&tp->lock);
4629 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4631 /* Tell compiler to fetch tx indices from memory. */
4633 return tnapi->tx_pending -
4634 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4637 /* Tigon3 never reports partial packet sends. So we do not
4638 * need special logic to handle SKBs that have not had all
4639 * of their frags sent yet, like SunGEM does.
4641 static void tg3_tx(struct tg3_napi *tnapi)
4643 struct tg3 *tp = tnapi->tp;
4644 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4645 u32 sw_idx = tnapi->tx_cons;
4646 struct netdev_queue *txq;
4647 int index = tnapi - tp->napi;
4649 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4652 txq = netdev_get_tx_queue(tp->dev, index);
4654 while (sw_idx != hw_idx) {
4655 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4656 struct sk_buff *skb = ri->skb;
4659 if (unlikely(skb == NULL)) {
4664 pci_unmap_single(tp->pdev,
4665 dma_unmap_addr(ri, mapping),
4671 sw_idx = NEXT_TX(sw_idx);
4673 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4674 ri = &tnapi->tx_buffers[sw_idx];
4675 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4678 pci_unmap_page(tp->pdev,
4679 dma_unmap_addr(ri, mapping),
4680 skb_shinfo(skb)->frags[i].size,
4682 sw_idx = NEXT_TX(sw_idx);
4687 if (unlikely(tx_bug)) {
4693 tnapi->tx_cons = sw_idx;
4695 /* Need to make the tx_cons update visible to tg3_start_xmit()
4696 * before checking for netif_queue_stopped(). Without the
4697 * memory barrier, there is a small possibility that tg3_start_xmit()
4698 * will miss it and cause the queue to be stopped forever.
4702 if (unlikely(netif_tx_queue_stopped(txq) &&
4703 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4704 __netif_tx_lock(txq, smp_processor_id());
4705 if (netif_tx_queue_stopped(txq) &&
4706 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4707 netif_tx_wake_queue(txq);
4708 __netif_tx_unlock(txq);
4712 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4717 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4718 map_sz, PCI_DMA_FROMDEVICE);
4719 dev_kfree_skb_any(ri->skb);
4723 /* Returns size of skb allocated or < 0 on error.
4725 * We only need to fill in the address because the other members
4726 * of the RX descriptor are invariant, see tg3_init_rings.
4728 * Note the purposeful assymetry of cpu vs. chip accesses. For
4729 * posting buffers we only dirty the first cache line of the RX
4730 * descriptor (containing the address). Whereas for the RX status
4731 * buffers the cpu only reads the last cacheline of the RX descriptor
4732 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4734 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4735 u32 opaque_key, u32 dest_idx_unmasked)
4737 struct tg3_rx_buffer_desc *desc;
4738 struct ring_info *map;
4739 struct sk_buff *skb;
4741 int skb_size, dest_idx;
4743 switch (opaque_key) {
4744 case RXD_OPAQUE_RING_STD:
4745 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4746 desc = &tpr->rx_std[dest_idx];
4747 map = &tpr->rx_std_buffers[dest_idx];
4748 skb_size = tp->rx_pkt_map_sz;
4751 case RXD_OPAQUE_RING_JUMBO:
4752 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4753 desc = &tpr->rx_jmb[dest_idx].std;
4754 map = &tpr->rx_jmb_buffers[dest_idx];
4755 skb_size = TG3_RX_JMB_MAP_SZ;
4762 /* Do not overwrite any of the map or rp information
4763 * until we are sure we can commit to a new buffer.
4765 * Callers depend upon this behavior and assume that
4766 * we leave everything unchanged if we fail.
4768 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4772 skb_reserve(skb, tp->rx_offset);
4774 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4775 PCI_DMA_FROMDEVICE);
4776 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4782 dma_unmap_addr_set(map, mapping, mapping);
4784 desc->addr_hi = ((u64)mapping >> 32);
4785 desc->addr_lo = ((u64)mapping & 0xffffffff);
4790 /* We only need to move over in the address because the other
4791 * members of the RX descriptor are invariant. See notes above
4792 * tg3_alloc_rx_skb for full details.
4794 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4795 struct tg3_rx_prodring_set *dpr,
4796 u32 opaque_key, int src_idx,
4797 u32 dest_idx_unmasked)
4799 struct tg3 *tp = tnapi->tp;
4800 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4801 struct ring_info *src_map, *dest_map;
4802 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4805 switch (opaque_key) {
4806 case RXD_OPAQUE_RING_STD:
4807 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4808 dest_desc = &dpr->rx_std[dest_idx];
4809 dest_map = &dpr->rx_std_buffers[dest_idx];
4810 src_desc = &spr->rx_std[src_idx];
4811 src_map = &spr->rx_std_buffers[src_idx];
4814 case RXD_OPAQUE_RING_JUMBO:
4815 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4816 dest_desc = &dpr->rx_jmb[dest_idx].std;
4817 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4818 src_desc = &spr->rx_jmb[src_idx].std;
4819 src_map = &spr->rx_jmb_buffers[src_idx];
4826 dest_map->skb = src_map->skb;
4827 dma_unmap_addr_set(dest_map, mapping,
4828 dma_unmap_addr(src_map, mapping));
4829 dest_desc->addr_hi = src_desc->addr_hi;
4830 dest_desc->addr_lo = src_desc->addr_lo;
4832 /* Ensure that the update to the skb happens after the physical
4833 * addresses have been transferred to the new BD location.
4837 src_map->skb = NULL;
4840 /* The RX ring scheme is composed of multiple rings which post fresh
4841 * buffers to the chip, and one special ring the chip uses to report
4842 * status back to the host.
4844 * The special ring reports the status of received packets to the
4845 * host. The chip does not write into the original descriptor the
4846 * RX buffer was obtained from. The chip simply takes the original
4847 * descriptor as provided by the host, updates the status and length
4848 * field, then writes this into the next status ring entry.
4850 * Each ring the host uses to post buffers to the chip is described
4851 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4852 * it is first placed into the on-chip ram. When the packet's length
4853 * is known, it walks down the TG3_BDINFO entries to select the ring.
4854 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4855 * which is within the range of the new packet's length is chosen.
4857 * The "separate ring for rx status" scheme may sound queer, but it makes
4858 * sense from a cache coherency perspective. If only the host writes
4859 * to the buffer post rings, and only the chip writes to the rx status
4860 * rings, then cache lines never move beyond shared-modified state.
4861 * If both the host and chip were to write into the same ring, cache line
4862 * eviction could occur since both entities want it in an exclusive state.
4864 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4866 struct tg3 *tp = tnapi->tp;
4867 u32 work_mask, rx_std_posted = 0;
4868 u32 std_prod_idx, jmb_prod_idx;
4869 u32 sw_idx = tnapi->rx_rcb_ptr;
4872 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4874 hw_idx = *(tnapi->rx_rcb_prod_idx);
4876 * We need to order the read of hw_idx and the read of
4877 * the opaque cookie.
4882 std_prod_idx = tpr->rx_std_prod_idx;
4883 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4884 while (sw_idx != hw_idx && budget > 0) {
4885 struct ring_info *ri;
4886 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4888 struct sk_buff *skb;
4889 dma_addr_t dma_addr;
4890 u32 opaque_key, desc_idx, *post_ptr;
4892 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4893 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4894 if (opaque_key == RXD_OPAQUE_RING_STD) {
4895 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4896 dma_addr = dma_unmap_addr(ri, mapping);
4898 post_ptr = &std_prod_idx;
4900 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4901 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4904 post_ptr = &jmb_prod_idx;
4906 goto next_pkt_nopost;
4908 work_mask |= opaque_key;
4910 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4911 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4913 tg3_recycle_rx(tnapi, tpr, opaque_key,
4914 desc_idx, *post_ptr);
4916 /* Other statistics kept track of by card. */
4921 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4924 if (len > TG3_RX_COPY_THRESH(tp)) {
4927 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4932 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4933 PCI_DMA_FROMDEVICE);
4935 /* Ensure that the update to the skb happens
4936 * after the usage of the old DMA mapping.
4944 struct sk_buff *copy_skb;
4946 tg3_recycle_rx(tnapi, tpr, opaque_key,
4947 desc_idx, *post_ptr);
4949 copy_skb = netdev_alloc_skb(tp->dev, len +
4951 if (copy_skb == NULL)
4952 goto drop_it_no_recycle;
4954 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4955 skb_put(copy_skb, len);
4956 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4957 skb_copy_from_linear_data(skb, copy_skb->data, len);
4958 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4960 /* We'll reuse the original ring buffer. */
4964 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4965 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4966 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4967 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4968 skb->ip_summed = CHECKSUM_UNNECESSARY;
4970 skb_checksum_none_assert(skb);
4972 skb->protocol = eth_type_trans(skb, tp->dev);
4974 if (len > (tp->dev->mtu + ETH_HLEN) &&
4975 skb->protocol != htons(ETH_P_8021Q)) {
4977 goto drop_it_no_recycle;
4980 if (desc->type_flags & RXD_FLAG_VLAN &&
4981 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4982 __vlan_hwaccel_put_tag(skb,
4983 desc->err_vlan & RXD_VLAN_MASK);
4985 napi_gro_receive(&tnapi->napi, skb);
4993 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4994 tpr->rx_std_prod_idx = std_prod_idx &
4995 tp->rx_std_ring_mask;
4996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4997 tpr->rx_std_prod_idx);
4998 work_mask &= ~RXD_OPAQUE_RING_STD;
5003 sw_idx &= tp->rx_ret_ring_mask;
5005 /* Refresh hw_idx to see if there is new work */
5006 if (sw_idx == hw_idx) {
5007 hw_idx = *(tnapi->rx_rcb_prod_idx);
5012 /* ACK the status ring. */
5013 tnapi->rx_rcb_ptr = sw_idx;
5014 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5016 /* Refill RX ring(s). */
5017 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
5018 if (work_mask & RXD_OPAQUE_RING_STD) {
5019 tpr->rx_std_prod_idx = std_prod_idx &
5020 tp->rx_std_ring_mask;
5021 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5022 tpr->rx_std_prod_idx);
5024 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5025 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5026 tp->rx_jmb_ring_mask;
5027 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5028 tpr->rx_jmb_prod_idx);
5031 } else if (work_mask) {
5032 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5033 * updated before the producer indices can be updated.
5037 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5038 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5040 if (tnapi != &tp->napi[1])
5041 napi_schedule(&tp->napi[1].napi);
5047 static void tg3_poll_link(struct tg3 *tp)
5049 /* handle link change and other phy events */
5050 if (!(tp->tg3_flags &
5051 (TG3_FLAG_USE_LINKCHG_REG |
5052 TG3_FLAG_POLL_SERDES))) {
5053 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5055 if (sblk->status & SD_STATUS_LINK_CHG) {
5056 sblk->status = SD_STATUS_UPDATED |
5057 (sblk->status & ~SD_STATUS_LINK_CHG);
5058 spin_lock(&tp->lock);
5059 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
5061 (MAC_STATUS_SYNC_CHANGED |
5062 MAC_STATUS_CFG_CHANGED |
5063 MAC_STATUS_MI_COMPLETION |
5064 MAC_STATUS_LNKSTATE_CHANGED));
5067 tg3_setup_phy(tp, 0);
5068 spin_unlock(&tp->lock);
5073 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5074 struct tg3_rx_prodring_set *dpr,
5075 struct tg3_rx_prodring_set *spr)
5077 u32 si, di, cpycnt, src_prod_idx;
5081 src_prod_idx = spr->rx_std_prod_idx;
5083 /* Make sure updates to the rx_std_buffers[] entries and the
5084 * standard producer index are seen in the correct order.
5088 if (spr->rx_std_cons_idx == src_prod_idx)
5091 if (spr->rx_std_cons_idx < src_prod_idx)
5092 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5094 cpycnt = tp->rx_std_ring_mask + 1 -
5095 spr->rx_std_cons_idx;
5097 cpycnt = min(cpycnt,
5098 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5100 si = spr->rx_std_cons_idx;
5101 di = dpr->rx_std_prod_idx;
5103 for (i = di; i < di + cpycnt; i++) {
5104 if (dpr->rx_std_buffers[i].skb) {
5114 /* Ensure that updates to the rx_std_buffers ring and the
5115 * shadowed hardware producer ring from tg3_recycle_skb() are
5116 * ordered correctly WRT the skb check above.
5120 memcpy(&dpr->rx_std_buffers[di],
5121 &spr->rx_std_buffers[si],
5122 cpycnt * sizeof(struct ring_info));
5124 for (i = 0; i < cpycnt; i++, di++, si++) {
5125 struct tg3_rx_buffer_desc *sbd, *dbd;
5126 sbd = &spr->rx_std[si];
5127 dbd = &dpr->rx_std[di];
5128 dbd->addr_hi = sbd->addr_hi;
5129 dbd->addr_lo = sbd->addr_lo;
5132 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5133 tp->rx_std_ring_mask;
5134 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5135 tp->rx_std_ring_mask;
5139 src_prod_idx = spr->rx_jmb_prod_idx;
5141 /* Make sure updates to the rx_jmb_buffers[] entries and
5142 * the jumbo producer index are seen in the correct order.
5146 if (spr->rx_jmb_cons_idx == src_prod_idx)
5149 if (spr->rx_jmb_cons_idx < src_prod_idx)
5150 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5152 cpycnt = tp->rx_jmb_ring_mask + 1 -
5153 spr->rx_jmb_cons_idx;
5155 cpycnt = min(cpycnt,
5156 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5158 si = spr->rx_jmb_cons_idx;
5159 di = dpr->rx_jmb_prod_idx;
5161 for (i = di; i < di + cpycnt; i++) {
5162 if (dpr->rx_jmb_buffers[i].skb) {
5172 /* Ensure that updates to the rx_jmb_buffers ring and the
5173 * shadowed hardware producer ring from tg3_recycle_skb() are
5174 * ordered correctly WRT the skb check above.
5178 memcpy(&dpr->rx_jmb_buffers[di],
5179 &spr->rx_jmb_buffers[si],
5180 cpycnt * sizeof(struct ring_info));
5182 for (i = 0; i < cpycnt; i++, di++, si++) {
5183 struct tg3_rx_buffer_desc *sbd, *dbd;
5184 sbd = &spr->rx_jmb[si].std;
5185 dbd = &dpr->rx_jmb[di].std;
5186 dbd->addr_hi = sbd->addr_hi;
5187 dbd->addr_lo = sbd->addr_lo;
5190 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5191 tp->rx_jmb_ring_mask;
5192 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5193 tp->rx_jmb_ring_mask;
5199 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5201 struct tg3 *tp = tnapi->tp;
5203 /* run TX completion thread */
5204 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5206 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5210 /* run RX thread, within the bounds set by NAPI.
5211 * All RX "locking" is done by ensuring outside
5212 * code synchronizes with tg3->napi.poll()
5214 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5215 work_done += tg3_rx(tnapi, budget - work_done);
5217 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5218 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5220 u32 std_prod_idx = dpr->rx_std_prod_idx;
5221 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5223 for (i = 1; i < tp->irq_cnt; i++)
5224 err |= tg3_rx_prodring_xfer(tp, dpr,
5225 &tp->napi[i].prodring);
5229 if (std_prod_idx != dpr->rx_std_prod_idx)
5230 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5231 dpr->rx_std_prod_idx);
5233 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5234 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5235 dpr->rx_jmb_prod_idx);
5240 tw32_f(HOSTCC_MODE, tp->coal_now);
5246 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5248 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5249 struct tg3 *tp = tnapi->tp;
5251 struct tg3_hw_status *sblk = tnapi->hw_status;
5254 work_done = tg3_poll_work(tnapi, work_done, budget);
5256 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5259 if (unlikely(work_done >= budget))
5262 /* tp->last_tag is used in tg3_int_reenable() below
5263 * to tell the hw how much work has been processed,
5264 * so we must read it before checking for more work.
5266 tnapi->last_tag = sblk->status_tag;
5267 tnapi->last_irq_tag = tnapi->last_tag;
5270 /* check for RX/TX work to do */
5271 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5272 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5273 napi_complete(napi);
5274 /* Reenable interrupts. */
5275 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5284 /* work_done is guaranteed to be less than budget. */
5285 napi_complete(napi);
5286 schedule_work(&tp->reset_task);
5290 static void tg3_process_error(struct tg3 *tp)
5293 bool real_error = false;
5295 if (tp->tg3_flags & TG3_FLAG_ERROR_PROCESSED)
5298 /* Check Flow Attention register */
5299 val = tr32(HOSTCC_FLOW_ATTN);
5300 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5301 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5305 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5306 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5310 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5311 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5320 tp->tg3_flags |= TG3_FLAG_ERROR_PROCESSED;
5321 schedule_work(&tp->reset_task);
5324 static int tg3_poll(struct napi_struct *napi, int budget)
5326 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5327 struct tg3 *tp = tnapi->tp;
5329 struct tg3_hw_status *sblk = tnapi->hw_status;
5332 if (sblk->status & SD_STATUS_ERROR)
5333 tg3_process_error(tp);
5337 work_done = tg3_poll_work(tnapi, work_done, budget);
5339 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5342 if (unlikely(work_done >= budget))
5345 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5346 /* tp->last_tag is used in tg3_int_reenable() below
5347 * to tell the hw how much work has been processed,
5348 * so we must read it before checking for more work.
5350 tnapi->last_tag = sblk->status_tag;
5351 tnapi->last_irq_tag = tnapi->last_tag;
5354 sblk->status &= ~SD_STATUS_UPDATED;
5356 if (likely(!tg3_has_work(tnapi))) {
5357 napi_complete(napi);
5358 tg3_int_reenable(tnapi);
5366 /* work_done is guaranteed to be less than budget. */
5367 napi_complete(napi);
5368 schedule_work(&tp->reset_task);
5372 static void tg3_napi_disable(struct tg3 *tp)
5376 for (i = tp->irq_cnt - 1; i >= 0; i--)
5377 napi_disable(&tp->napi[i].napi);
5380 static void tg3_napi_enable(struct tg3 *tp)
5384 for (i = 0; i < tp->irq_cnt; i++)
5385 napi_enable(&tp->napi[i].napi);
5388 static void tg3_napi_init(struct tg3 *tp)
5392 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5393 for (i = 1; i < tp->irq_cnt; i++)
5394 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5397 static void tg3_napi_fini(struct tg3 *tp)
5401 for (i = 0; i < tp->irq_cnt; i++)
5402 netif_napi_del(&tp->napi[i].napi);
5405 static inline void tg3_netif_stop(struct tg3 *tp)
5407 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5408 tg3_napi_disable(tp);
5409 netif_tx_disable(tp->dev);
5412 static inline void tg3_netif_start(struct tg3 *tp)
5414 /* NOTE: unconditional netif_tx_wake_all_queues is only
5415 * appropriate so long as all callers are assured to
5416 * have free tx slots (such as after tg3_init_hw)
5418 netif_tx_wake_all_queues(tp->dev);
5420 tg3_napi_enable(tp);
5421 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5422 tg3_enable_ints(tp);
5425 static void tg3_irq_quiesce(struct tg3 *tp)
5429 BUG_ON(tp->irq_sync);
5434 for (i = 0; i < tp->irq_cnt; i++)
5435 synchronize_irq(tp->napi[i].irq_vec);
5438 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5439 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5440 * with as well. Most of the time, this is not necessary except when
5441 * shutting down the device.
5443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5445 spin_lock_bh(&tp->lock);
5447 tg3_irq_quiesce(tp);
5450 static inline void tg3_full_unlock(struct tg3 *tp)
5452 spin_unlock_bh(&tp->lock);
5455 /* One-shot MSI handler - Chip automatically disables interrupt
5456 * after sending MSI so driver doesn't have to do it.
5458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5460 struct tg3_napi *tnapi = dev_id;
5461 struct tg3 *tp = tnapi->tp;
5463 prefetch(tnapi->hw_status);
5465 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5467 if (likely(!tg3_irq_sync(tp)))
5468 napi_schedule(&tnapi->napi);
5473 /* MSI ISR - No need to check for interrupt sharing and no need to
5474 * flush status block and interrupt mailbox. PCI ordering rules
5475 * guarantee that MSI will arrive after the status block.
5477 static irqreturn_t tg3_msi(int irq, void *dev_id)
5479 struct tg3_napi *tnapi = dev_id;
5480 struct tg3 *tp = tnapi->tp;
5482 prefetch(tnapi->hw_status);
5484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5486 * Writing any value to intr-mbox-0 clears PCI INTA# and
5487 * chip-internal interrupt pending events.
5488 * Writing non-zero to intr-mbox-0 additional tells the
5489 * NIC to stop sending us irqs, engaging "in-intr-handler"
5492 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5493 if (likely(!tg3_irq_sync(tp)))
5494 napi_schedule(&tnapi->napi);
5496 return IRQ_RETVAL(1);
5499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5501 struct tg3_napi *tnapi = dev_id;
5502 struct tg3 *tp = tnapi->tp;
5503 struct tg3_hw_status *sblk = tnapi->hw_status;
5504 unsigned int handled = 1;
5506 /* In INTx mode, it is possible for the interrupt to arrive at
5507 * the CPU before the status block posted prior to the interrupt.
5508 * Reading the PCI State register will confirm whether the
5509 * interrupt is ours and will flush the status block.
5511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5512 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5520 * Writing any value to intr-mbox-0 clears PCI INTA# and
5521 * chip-internal interrupt pending events.
5522 * Writing non-zero to intr-mbox-0 additional tells the
5523 * NIC to stop sending us irqs, engaging "in-intr-handler"
5526 * Flush the mailbox to de-assert the IRQ immediately to prevent
5527 * spurious interrupts. The flush impacts performance but
5528 * excessive spurious interrupts can be worse in some cases.
5530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5531 if (tg3_irq_sync(tp))
5533 sblk->status &= ~SD_STATUS_UPDATED;
5534 if (likely(tg3_has_work(tnapi))) {
5535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5536 napi_schedule(&tnapi->napi);
5538 /* No work, shared interrupt perhaps? re-enable
5539 * interrupts, and flush that PCI write
5541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5545 return IRQ_RETVAL(handled);
5548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5550 struct tg3_napi *tnapi = dev_id;
5551 struct tg3 *tp = tnapi->tp;
5552 struct tg3_hw_status *sblk = tnapi->hw_status;
5553 unsigned int handled = 1;
5555 /* In INTx mode, it is possible for the interrupt to arrive at
5556 * the CPU before the status block posted prior to the interrupt.
5557 * Reading the PCI State register will confirm whether the
5558 * interrupt is ours and will flush the status block.
5560 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5561 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5569 * writing any value to intr-mbox-0 clears PCI INTA# and
5570 * chip-internal interrupt pending events.
5571 * writing non-zero to intr-mbox-0 additional tells the
5572 * NIC to stop sending us irqs, engaging "in-intr-handler"
5575 * Flush the mailbox to de-assert the IRQ immediately to prevent
5576 * spurious interrupts. The flush impacts performance but
5577 * excessive spurious interrupts can be worse in some cases.
5579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5582 * In a shared interrupt configuration, sometimes other devices'
5583 * interrupts will scream. We record the current status tag here
5584 * so that the above check can report that the screaming interrupts
5585 * are unhandled. Eventually they will be silenced.
5587 tnapi->last_irq_tag = sblk->status_tag;
5589 if (tg3_irq_sync(tp))
5592 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5594 napi_schedule(&tnapi->napi);
5597 return IRQ_RETVAL(handled);
5600 /* ISR for interrupt test */
5601 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5603 struct tg3_napi *tnapi = dev_id;
5604 struct tg3 *tp = tnapi->tp;
5605 struct tg3_hw_status *sblk = tnapi->hw_status;
5607 if ((sblk->status & SD_STATUS_UPDATED) ||
5608 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5609 tg3_disable_ints(tp);
5610 return IRQ_RETVAL(1);
5612 return IRQ_RETVAL(0);
5615 static int tg3_init_hw(struct tg3 *, int);
5616 static int tg3_halt(struct tg3 *, int, int);
5618 /* Restart hardware after configuration changes, self-test, etc.
5619 * Invoked with tp->lock held.
5621 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5622 __releases(tp->lock)
5623 __acquires(tp->lock)
5627 err = tg3_init_hw(tp, reset_phy);
5630 "Failed to re-initialize device, aborting\n");
5631 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5632 tg3_full_unlock(tp);
5633 del_timer_sync(&tp->timer);
5635 tg3_napi_enable(tp);
5637 tg3_full_lock(tp, 0);
5642 #ifdef CONFIG_NET_POLL_CONTROLLER
5643 static void tg3_poll_controller(struct net_device *dev)
5646 struct tg3 *tp = netdev_priv(dev);
5648 for (i = 0; i < tp->irq_cnt; i++)
5649 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5653 static void tg3_reset_task(struct work_struct *work)
5655 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5657 unsigned int restart_timer;
5659 tg3_full_lock(tp, 0);
5661 if (!netif_running(tp->dev)) {
5662 tg3_full_unlock(tp);
5666 tg3_full_unlock(tp);
5672 tg3_full_lock(tp, 1);
5674 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5675 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5677 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5678 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5679 tp->write32_rx_mbox = tg3_write_flush_reg32;
5680 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5681 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5685 err = tg3_init_hw(tp, 1);
5689 tg3_netif_start(tp);
5692 mod_timer(&tp->timer, jiffies + 1);
5695 tg3_full_unlock(tp);
5701 static void tg3_tx_timeout(struct net_device *dev)
5703 struct tg3 *tp = netdev_priv(dev);
5705 if (netif_msg_tx_err(tp)) {
5706 netdev_err(dev, "transmit timed out, resetting\n");
5710 schedule_work(&tp->reset_task);
5713 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5714 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5716 u32 base = (u32) mapping & 0xffffffff;
5718 return (base > 0xffffdcc0) && (base + len + 8 < base);
5721 /* Test for DMA addresses > 40-bit */
5722 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5725 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5726 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5727 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5734 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5736 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5737 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5738 struct sk_buff *skb, u32 last_plus_one,
5739 u32 *start, u32 base_flags, u32 mss)
5741 struct tg3 *tp = tnapi->tp;
5742 struct sk_buff *new_skb;
5743 dma_addr_t new_addr = 0;
5747 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5748 new_skb = skb_copy(skb, GFP_ATOMIC);
5750 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5752 new_skb = skb_copy_expand(skb,
5753 skb_headroom(skb) + more_headroom,
5754 skb_tailroom(skb), GFP_ATOMIC);
5760 /* New SKB is guaranteed to be linear. */
5762 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5764 /* Make sure the mapping succeeded */
5765 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5767 dev_kfree_skb(new_skb);
5770 /* Make sure new skb does not cross any 4G boundaries.
5771 * Drop the packet if it does.
5773 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5774 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5775 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5778 dev_kfree_skb(new_skb);
5781 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5782 base_flags, 1 | (mss << 1));
5783 *start = NEXT_TX(entry);
5787 /* Now clean up the sw ring entries. */
5789 while (entry != last_plus_one) {
5793 len = skb_headlen(skb);
5795 len = skb_shinfo(skb)->frags[i-1].size;
5797 pci_unmap_single(tp->pdev,
5798 dma_unmap_addr(&tnapi->tx_buffers[entry],
5800 len, PCI_DMA_TODEVICE);
5802 tnapi->tx_buffers[entry].skb = new_skb;
5803 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5806 tnapi->tx_buffers[entry].skb = NULL;
5808 entry = NEXT_TX(entry);
5817 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5818 dma_addr_t mapping, int len, u32 flags,
5821 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5822 int is_end = (mss_and_is_end & 0x1);
5823 u32 mss = (mss_and_is_end >> 1);
5827 flags |= TXD_FLAG_END;
5828 if (flags & TXD_FLAG_VLAN) {
5829 vlan_tag = flags >> 16;
5832 vlan_tag |= (mss << TXD_MSS_SHIFT);
5834 txd->addr_hi = ((u64) mapping >> 32);
5835 txd->addr_lo = ((u64) mapping & 0xffffffff);
5836 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5837 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5840 /* hard_start_xmit for devices that don't have any bugs and
5841 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5843 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5844 struct net_device *dev)
5846 struct tg3 *tp = netdev_priv(dev);
5847 u32 len, entry, base_flags, mss;
5849 struct tg3_napi *tnapi;
5850 struct netdev_queue *txq;
5851 unsigned int i, last;
5853 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5854 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5855 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5858 /* We are running in BH disabled context with netif_tx_lock
5859 * and TX reclaim runs via tp->napi.poll inside of a software
5860 * interrupt. Furthermore, IRQ processing runs lockless so we have
5861 * no IRQ context deadlocks to worry about either. Rejoice!
5863 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5864 if (!netif_tx_queue_stopped(txq)) {
5865 netif_tx_stop_queue(txq);
5867 /* This is a hard error, log it. */
5869 "BUG! Tx Ring full when queue awake!\n");
5871 return NETDEV_TX_BUSY;
5874 entry = tnapi->tx_prod;
5876 mss = skb_shinfo(skb)->gso_size;
5878 int tcp_opt_len, ip_tcp_len;
5881 if (skb_header_cloned(skb) &&
5882 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 if (skb_is_gso_v6(skb)) {
5888 hdrlen = skb_headlen(skb) - ETH_HLEN;
5890 struct iphdr *iph = ip_hdr(skb);
5892 tcp_opt_len = tcp_optlen(skb);
5893 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5896 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5897 hdrlen = ip_tcp_len + tcp_opt_len;
5900 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5901 mss |= (hdrlen & 0xc) << 12;
5903 base_flags |= 0x00000010;
5904 base_flags |= (hdrlen & 0x3e0) << 5;
5908 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5909 TXD_FLAG_CPU_POST_DMA);
5911 tcp_hdr(skb)->check = 0;
5913 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5914 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5917 if (vlan_tx_tag_present(skb))
5918 base_flags |= (TXD_FLAG_VLAN |
5919 (vlan_tx_tag_get(skb) << 16));
5921 len = skb_headlen(skb);
5923 /* Queue skb data, a.k.a. the main skb fragment. */
5924 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5925 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 tnapi->tx_buffers[entry].skb = skb;
5931 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5933 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5934 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5935 base_flags |= TXD_FLAG_JMB_PKT;
5937 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5938 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5940 entry = NEXT_TX(entry);
5942 /* Now loop through additional data fragments, and queue them. */
5943 if (skb_shinfo(skb)->nr_frags > 0) {
5944 last = skb_shinfo(skb)->nr_frags - 1;
5945 for (i = 0; i <= last; i++) {
5946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5949 mapping = pci_map_page(tp->pdev,
5952 len, PCI_DMA_TODEVICE);
5953 if (pci_dma_mapping_error(tp->pdev, mapping))
5956 tnapi->tx_buffers[entry].skb = NULL;
5957 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5960 tg3_set_txd(tnapi, entry, mapping, len,
5961 base_flags, (i == last) | (mss << 1));
5963 entry = NEXT_TX(entry);
5967 /* Packets are ready, update Tx producer idx local and on card. */
5968 tw32_tx_mbox(tnapi->prodmbox, entry);
5970 tnapi->tx_prod = entry;
5971 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5972 netif_tx_stop_queue(txq);
5974 /* netif_tx_stop_queue() must be done before checking
5975 * checking tx index in tg3_tx_avail() below, because in
5976 * tg3_tx(), we update tx index before checking for
5977 * netif_tx_queue_stopped().
5980 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5981 netif_tx_wake_queue(txq);
5987 return NETDEV_TX_OK;
5991 entry = tnapi->tx_prod;
5992 tnapi->tx_buffers[entry].skb = NULL;
5993 pci_unmap_single(tp->pdev,
5994 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5997 for (i = 0; i <= last; i++) {
5998 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5999 entry = NEXT_TX(entry);
6001 pci_unmap_page(tp->pdev,
6002 dma_unmap_addr(&tnapi->tx_buffers[entry],
6004 frag->size, PCI_DMA_TODEVICE);
6008 return NETDEV_TX_OK;
6011 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6012 struct net_device *);
6014 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6015 * TSO header is greater than 80 bytes.
6017 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6019 struct sk_buff *segs, *nskb;
6020 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6022 /* Estimate the number of fragments in the worst case */
6023 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6024 netif_stop_queue(tp->dev);
6026 /* netif_tx_stop_queue() must be done before checking
6027 * checking tx index in tg3_tx_avail() below, because in
6028 * tg3_tx(), we update tx index before checking for
6029 * netif_tx_queue_stopped().
6032 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6033 return NETDEV_TX_BUSY;
6035 netif_wake_queue(tp->dev);
6038 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6040 goto tg3_tso_bug_end;
6046 tg3_start_xmit_dma_bug(nskb, tp->dev);
6052 return NETDEV_TX_OK;
6055 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6056 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
6058 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6059 struct net_device *dev)
6061 struct tg3 *tp = netdev_priv(dev);
6062 u32 len, entry, base_flags, mss;
6063 int would_hit_hwbug;
6065 struct tg3_napi *tnapi;
6066 struct netdev_queue *txq;
6067 unsigned int i, last;
6069 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6070 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6071 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
6074 /* We are running in BH disabled context with netif_tx_lock
6075 * and TX reclaim runs via tp->napi.poll inside of a software
6076 * interrupt. Furthermore, IRQ processing runs lockless so we have
6077 * no IRQ context deadlocks to worry about either. Rejoice!
6079 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6080 if (!netif_tx_queue_stopped(txq)) {
6081 netif_tx_stop_queue(txq);
6083 /* This is a hard error, log it. */
6085 "BUG! Tx Ring full when queue awake!\n");
6087 return NETDEV_TX_BUSY;
6090 entry = tnapi->tx_prod;
6092 if (skb->ip_summed == CHECKSUM_PARTIAL)
6093 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6095 mss = skb_shinfo(skb)->gso_size;
6098 u32 tcp_opt_len, hdr_len;
6100 if (skb_header_cloned(skb) &&
6101 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6107 tcp_opt_len = tcp_optlen(skb);
6109 if (skb_is_gso_v6(skb)) {
6110 hdr_len = skb_headlen(skb) - ETH_HLEN;
6114 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6115 hdr_len = ip_tcp_len + tcp_opt_len;
6118 iph->tot_len = htons(mss + hdr_len);
6121 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6122 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
6123 return tg3_tso_bug(tp, skb);
6125 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6126 TXD_FLAG_CPU_POST_DMA);
6128 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
6129 tcp_hdr(skb)->check = 0;
6130 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6132 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6137 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
6138 mss |= (hdr_len & 0xc) << 12;
6140 base_flags |= 0x00000010;
6141 base_flags |= (hdr_len & 0x3e0) << 5;
6142 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
6143 mss |= hdr_len << 9;
6144 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
6145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6146 if (tcp_opt_len || iph->ihl > 5) {
6149 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6150 mss |= (tsflags << 11);
6153 if (tcp_opt_len || iph->ihl > 5) {
6156 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6157 base_flags |= tsflags << 12;
6162 if (vlan_tx_tag_present(skb))
6163 base_flags |= (TXD_FLAG_VLAN |
6164 (vlan_tx_tag_get(skb) << 16));
6166 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6167 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6168 base_flags |= TXD_FLAG_JMB_PKT;
6170 len = skb_headlen(skb);
6172 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6173 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6178 tnapi->tx_buffers[entry].skb = skb;
6179 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6181 would_hit_hwbug = 0;
6183 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6184 would_hit_hwbug = 1;
6186 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6187 tg3_4g_overflow_test(mapping, len))
6188 would_hit_hwbug = 1;
6190 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6191 tg3_40bit_overflow_test(tp, mapping, len))
6192 would_hit_hwbug = 1;
6194 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6195 would_hit_hwbug = 1;
6197 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6198 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6200 entry = NEXT_TX(entry);
6202 /* Now loop through additional data fragments, and queue them. */
6203 if (skb_shinfo(skb)->nr_frags > 0) {
6204 last = skb_shinfo(skb)->nr_frags - 1;
6205 for (i = 0; i <= last; i++) {
6206 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6209 mapping = pci_map_page(tp->pdev,
6212 len, PCI_DMA_TODEVICE);
6214 tnapi->tx_buffers[entry].skb = NULL;
6215 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6217 if (pci_dma_mapping_error(tp->pdev, mapping))
6220 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6222 would_hit_hwbug = 1;
6224 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6225 tg3_4g_overflow_test(mapping, len))
6226 would_hit_hwbug = 1;
6228 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6229 tg3_40bit_overflow_test(tp, mapping, len))
6230 would_hit_hwbug = 1;
6232 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6233 tg3_set_txd(tnapi, entry, mapping, len,
6234 base_flags, (i == last)|(mss << 1));
6236 tg3_set_txd(tnapi, entry, mapping, len,
6237 base_flags, (i == last));
6239 entry = NEXT_TX(entry);
6243 if (would_hit_hwbug) {
6244 u32 last_plus_one = entry;
6247 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6248 start &= (TG3_TX_RING_SIZE - 1);
6250 /* If the workaround fails due to memory/mapping
6251 * failure, silently drop this packet.
6253 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6254 &start, base_flags, mss))
6260 /* Packets are ready, update Tx producer idx local and on card. */
6261 tw32_tx_mbox(tnapi->prodmbox, entry);
6263 tnapi->tx_prod = entry;
6264 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6265 netif_tx_stop_queue(txq);
6267 /* netif_tx_stop_queue() must be done before checking
6268 * checking tx index in tg3_tx_avail() below, because in
6269 * tg3_tx(), we update tx index before checking for
6270 * netif_tx_queue_stopped().
6273 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6274 netif_tx_wake_queue(txq);
6280 return NETDEV_TX_OK;
6284 entry = tnapi->tx_prod;
6285 tnapi->tx_buffers[entry].skb = NULL;
6286 pci_unmap_single(tp->pdev,
6287 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6290 for (i = 0; i <= last; i++) {
6291 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6292 entry = NEXT_TX(entry);
6294 pci_unmap_page(tp->pdev,
6295 dma_unmap_addr(&tnapi->tx_buffers[entry],
6297 frag->size, PCI_DMA_TODEVICE);
6301 return NETDEV_TX_OK;
6304 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6306 struct tg3 *tp = netdev_priv(dev);
6308 if (dev->mtu > ETH_DATA_LEN && (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6309 features &= ~NETIF_F_ALL_TSO;
6314 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6319 if (new_mtu > ETH_DATA_LEN) {
6320 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6321 netdev_update_features(dev);
6322 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6324 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6327 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6328 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6329 netdev_update_features(dev);
6331 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6335 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6337 struct tg3 *tp = netdev_priv(dev);
6340 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6343 if (!netif_running(dev)) {
6344 /* We'll just catch it later when the
6347 tg3_set_mtu(dev, tp, new_mtu);
6355 tg3_full_lock(tp, 1);
6357 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6359 tg3_set_mtu(dev, tp, new_mtu);
6361 err = tg3_restart_hw(tp, 0);
6364 tg3_netif_start(tp);
6366 tg3_full_unlock(tp);
6374 static void tg3_rx_prodring_free(struct tg3 *tp,
6375 struct tg3_rx_prodring_set *tpr)
6379 if (tpr != &tp->napi[0].prodring) {
6380 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6381 i = (i + 1) & tp->rx_std_ring_mask)
6382 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6385 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6386 for (i = tpr->rx_jmb_cons_idx;
6387 i != tpr->rx_jmb_prod_idx;
6388 i = (i + 1) & tp->rx_jmb_ring_mask) {
6389 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6397 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6398 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6401 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6402 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6403 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6404 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6409 /* Initialize rx rings for packet processing.
6411 * The chip has been shut down and the driver detached from
6412 * the networking, so no interrupts or new tx packets will
6413 * end up in the driver. tp->{tx,}lock are held and thus
6416 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6417 struct tg3_rx_prodring_set *tpr)
6419 u32 i, rx_pkt_dma_sz;
6421 tpr->rx_std_cons_idx = 0;
6422 tpr->rx_std_prod_idx = 0;
6423 tpr->rx_jmb_cons_idx = 0;
6424 tpr->rx_jmb_prod_idx = 0;
6426 if (tpr != &tp->napi[0].prodring) {
6427 memset(&tpr->rx_std_buffers[0], 0,
6428 TG3_RX_STD_BUFF_RING_SIZE(tp));
6429 if (tpr->rx_jmb_buffers)
6430 memset(&tpr->rx_jmb_buffers[0], 0,
6431 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6435 /* Zero out all descriptors. */
6436 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6438 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6439 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6440 tp->dev->mtu > ETH_DATA_LEN)
6441 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6442 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6444 /* Initialize invariants of the rings, we only set this
6445 * stuff once. This works because the card does not
6446 * write into the rx buffer posting rings.
6448 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6449 struct tg3_rx_buffer_desc *rxd;
6451 rxd = &tpr->rx_std[i];
6452 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6453 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6454 rxd->opaque = (RXD_OPAQUE_RING_STD |
6455 (i << RXD_OPAQUE_INDEX_SHIFT));
6458 /* Now allocate fresh SKBs for each rx ring. */
6459 for (i = 0; i < tp->rx_pending; i++) {
6460 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6461 netdev_warn(tp->dev,
6462 "Using a smaller RX standard ring. Only "
6463 "%d out of %d buffers were allocated "
6464 "successfully\n", i, tp->rx_pending);
6472 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6473 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6476 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6478 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6481 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6482 struct tg3_rx_buffer_desc *rxd;
6484 rxd = &tpr->rx_jmb[i].std;
6485 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6486 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6488 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6489 (i << RXD_OPAQUE_INDEX_SHIFT));
6492 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6493 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6494 netdev_warn(tp->dev,
6495 "Using a smaller RX jumbo ring. Only %d "
6496 "out of %d buffers were allocated "
6497 "successfully\n", i, tp->rx_jumbo_pending);
6500 tp->rx_jumbo_pending = i;
6509 tg3_rx_prodring_free(tp, tpr);
6513 static void tg3_rx_prodring_fini(struct tg3 *tp,
6514 struct tg3_rx_prodring_set *tpr)
6516 kfree(tpr->rx_std_buffers);
6517 tpr->rx_std_buffers = NULL;
6518 kfree(tpr->rx_jmb_buffers);
6519 tpr->rx_jmb_buffers = NULL;
6521 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6522 tpr->rx_std, tpr->rx_std_mapping);
6526 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6527 tpr->rx_jmb, tpr->rx_jmb_mapping);
6532 static int tg3_rx_prodring_init(struct tg3 *tp,
6533 struct tg3_rx_prodring_set *tpr)
6535 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6537 if (!tpr->rx_std_buffers)
6540 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6541 TG3_RX_STD_RING_BYTES(tp),
6542 &tpr->rx_std_mapping,
6547 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6548 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6549 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6551 if (!tpr->rx_jmb_buffers)
6554 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6555 TG3_RX_JMB_RING_BYTES(tp),
6556 &tpr->rx_jmb_mapping,
6565 tg3_rx_prodring_fini(tp, tpr);
6569 /* Free up pending packets in all rx/tx rings.
6571 * The chip has been shut down and the driver detached from
6572 * the networking, so no interrupts or new tx packets will
6573 * end up in the driver. tp->{tx,}lock is not held and we are not
6574 * in an interrupt context and thus may sleep.
6576 static void tg3_free_rings(struct tg3 *tp)
6580 for (j = 0; j < tp->irq_cnt; j++) {
6581 struct tg3_napi *tnapi = &tp->napi[j];
6583 tg3_rx_prodring_free(tp, &tnapi->prodring);
6585 if (!tnapi->tx_buffers)
6588 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6589 struct ring_info *txp;
6590 struct sk_buff *skb;
6593 txp = &tnapi->tx_buffers[i];
6601 pci_unmap_single(tp->pdev,
6602 dma_unmap_addr(txp, mapping),
6609 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6610 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6611 pci_unmap_page(tp->pdev,
6612 dma_unmap_addr(txp, mapping),
6613 skb_shinfo(skb)->frags[k].size,
6618 dev_kfree_skb_any(skb);
6623 /* Initialize tx/rx rings for packet processing.
6625 * The chip has been shut down and the driver detached from
6626 * the networking, so no interrupts or new tx packets will
6627 * end up in the driver. tp->{tx,}lock are held and thus
6630 static int tg3_init_rings(struct tg3 *tp)
6634 /* Free up all the SKBs. */
6637 for (i = 0; i < tp->irq_cnt; i++) {
6638 struct tg3_napi *tnapi = &tp->napi[i];
6640 tnapi->last_tag = 0;
6641 tnapi->last_irq_tag = 0;
6642 tnapi->hw_status->status = 0;
6643 tnapi->hw_status->status_tag = 0;
6644 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6649 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6651 tnapi->rx_rcb_ptr = 0;
6653 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6655 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6665 * Must not be invoked with interrupt sources disabled and
6666 * the hardware shutdown down.
6668 static void tg3_free_consistent(struct tg3 *tp)
6672 for (i = 0; i < tp->irq_cnt; i++) {
6673 struct tg3_napi *tnapi = &tp->napi[i];
6675 if (tnapi->tx_ring) {
6676 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6677 tnapi->tx_ring, tnapi->tx_desc_mapping);
6678 tnapi->tx_ring = NULL;
6681 kfree(tnapi->tx_buffers);
6682 tnapi->tx_buffers = NULL;
6684 if (tnapi->rx_rcb) {
6685 dma_free_coherent(&tp->pdev->dev,
6686 TG3_RX_RCB_RING_BYTES(tp),
6688 tnapi->rx_rcb_mapping);
6689 tnapi->rx_rcb = NULL;
6692 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6694 if (tnapi->hw_status) {
6695 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6697 tnapi->status_mapping);
6698 tnapi->hw_status = NULL;
6703 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6704 tp->hw_stats, tp->stats_mapping);
6705 tp->hw_stats = NULL;
6710 * Must not be invoked with interrupt sources disabled and
6711 * the hardware shutdown down. Can sleep.
6713 static int tg3_alloc_consistent(struct tg3 *tp)
6717 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6718 sizeof(struct tg3_hw_stats),
6724 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6726 for (i = 0; i < tp->irq_cnt; i++) {
6727 struct tg3_napi *tnapi = &tp->napi[i];
6728 struct tg3_hw_status *sblk;
6730 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6732 &tnapi->status_mapping,
6734 if (!tnapi->hw_status)
6737 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6738 sblk = tnapi->hw_status;
6740 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6743 /* If multivector TSS is enabled, vector 0 does not handle
6744 * tx interrupts. Don't allocate any resources for it.
6746 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6747 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6748 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6751 if (!tnapi->tx_buffers)
6754 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6756 &tnapi->tx_desc_mapping,
6758 if (!tnapi->tx_ring)
6763 * When RSS is enabled, the status block format changes
6764 * slightly. The "rx_jumbo_consumer", "reserved",
6765 * and "rx_mini_consumer" members get mapped to the
6766 * other three rx return ring producer indexes.
6770 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6773 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6776 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6779 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6784 * If multivector RSS is enabled, vector 0 does not handle
6785 * rx or tx interrupts. Don't allocate any resources for it.
6787 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6790 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6791 TG3_RX_RCB_RING_BYTES(tp),
6792 &tnapi->rx_rcb_mapping,
6797 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6803 tg3_free_consistent(tp);
6807 #define MAX_WAIT_CNT 1000
6809 /* To stop a block, clear the enable bit and poll till it
6810 * clears. tp->lock is held.
6812 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6817 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6824 /* We can't enable/disable these bits of the
6825 * 5705/5750, just say success.
6838 for (i = 0; i < MAX_WAIT_CNT; i++) {
6841 if ((val & enable_bit) == 0)
6845 if (i == MAX_WAIT_CNT && !silent) {
6846 dev_err(&tp->pdev->dev,
6847 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6855 /* tp->lock is held. */
6856 static int tg3_abort_hw(struct tg3 *tp, int silent)
6860 tg3_disable_ints(tp);
6862 tp->rx_mode &= ~RX_MODE_ENABLE;
6863 tw32_f(MAC_RX_MODE, tp->rx_mode);
6866 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6867 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6868 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6869 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6870 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6871 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6876 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6877 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6878 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6881 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6882 tw32_f(MAC_MODE, tp->mac_mode);
6885 tp->tx_mode &= ~TX_MODE_ENABLE;
6886 tw32_f(MAC_TX_MODE, tp->tx_mode);
6888 for (i = 0; i < MAX_WAIT_CNT; i++) {
6890 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6893 if (i >= MAX_WAIT_CNT) {
6894 dev_err(&tp->pdev->dev,
6895 "%s timed out, TX_MODE_ENABLE will not clear "
6896 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6900 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6901 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6902 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6904 tw32(FTQ_RESET, 0xffffffff);
6905 tw32(FTQ_RESET, 0x00000000);
6907 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6908 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6910 for (i = 0; i < tp->irq_cnt; i++) {
6911 struct tg3_napi *tnapi = &tp->napi[i];
6912 if (tnapi->hw_status)
6913 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6916 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6921 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6926 /* NCSI does not support APE events */
6927 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6930 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6931 if (apedata != APE_SEG_SIG_MAGIC)
6934 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6935 if (!(apedata & APE_FW_STATUS_READY))
6938 /* Wait for up to 1 millisecond for APE to service previous event. */
6939 for (i = 0; i < 10; i++) {
6940 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6943 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6945 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6946 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6947 event | APE_EVENT_STATUS_EVENT_PENDING);
6949 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6951 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6957 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6958 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6961 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6966 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6970 case RESET_KIND_INIT:
6971 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6972 APE_HOST_SEG_SIG_MAGIC);
6973 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6974 APE_HOST_SEG_LEN_MAGIC);
6975 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6976 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6977 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6978 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6979 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6980 APE_HOST_BEHAV_NO_PHYLOCK);
6981 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6982 TG3_APE_HOST_DRVR_STATE_START);
6984 event = APE_EVENT_STATUS_STATE_START;
6986 case RESET_KIND_SHUTDOWN:
6987 /* With the interface we are currently using,
6988 * APE does not track driver state. Wiping
6989 * out the HOST SEGMENT SIGNATURE forces
6990 * the APE to assume OS absent status.
6992 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6994 if (device_may_wakeup(&tp->pdev->dev) &&
6995 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6996 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6997 TG3_APE_HOST_WOL_SPEED_AUTO);
6998 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7000 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7002 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7004 event = APE_EVENT_STATUS_STATE_UNLOAD;
7006 case RESET_KIND_SUSPEND:
7007 event = APE_EVENT_STATUS_STATE_SUSPEND;
7013 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7015 tg3_ape_send_event(tp, event);
7018 /* tp->lock is held. */
7019 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7021 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7022 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7024 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7026 case RESET_KIND_INIT:
7027 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7031 case RESET_KIND_SHUTDOWN:
7032 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7036 case RESET_KIND_SUSPEND:
7037 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7046 if (kind == RESET_KIND_INIT ||
7047 kind == RESET_KIND_SUSPEND)
7048 tg3_ape_driver_state_change(tp, kind);
7051 /* tp->lock is held. */
7052 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7054 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7056 case RESET_KIND_INIT:
7057 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7058 DRV_STATE_START_DONE);
7061 case RESET_KIND_SHUTDOWN:
7062 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7063 DRV_STATE_UNLOAD_DONE);
7071 if (kind == RESET_KIND_SHUTDOWN)
7072 tg3_ape_driver_state_change(tp, kind);
7075 /* tp->lock is held. */
7076 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7078 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7080 case RESET_KIND_INIT:
7081 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7085 case RESET_KIND_SHUTDOWN:
7086 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7090 case RESET_KIND_SUSPEND:
7091 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7101 static int tg3_poll_fw(struct tg3 *tp)
7106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7107 /* Wait up to 20ms for init done. */
7108 for (i = 0; i < 200; i++) {
7109 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7116 /* Wait for firmware initialization to complete. */
7117 for (i = 0; i < 100000; i++) {
7118 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7119 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7124 /* Chip might not be fitted with firmware. Some Sun onboard
7125 * parts are configured like that. So don't signal the timeout
7126 * of the above loop as an error, but do report the lack of
7127 * running firmware once.
7130 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
7131 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
7133 netdev_info(tp->dev, "No firmware running\n");
7136 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7137 /* The 57765 A0 needs a little more
7138 * time to do some important work.
7146 /* Save PCI command register before chip reset */
7147 static void tg3_save_pci_state(struct tg3 *tp)
7149 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7152 /* Restore PCI state after chip reset */
7153 static void tg3_restore_pci_state(struct tg3 *tp)
7157 /* Re-enable indirect register accesses. */
7158 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7159 tp->misc_host_ctrl);
7161 /* Set MAX PCI retry to zero. */
7162 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7163 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7164 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
7165 val |= PCISTATE_RETRY_SAME_DMA;
7166 /* Allow reads and writes to the APE register and memory space. */
7167 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7168 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7169 PCISTATE_ALLOW_APE_SHMEM_WR |
7170 PCISTATE_ALLOW_APE_PSPACE_WR;
7171 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7173 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7175 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7176 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7177 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7179 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7180 tp->pci_cacheline_sz);
7181 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7186 /* Make sure PCI-X relaxed ordering bit is clear. */
7187 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7190 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7192 pcix_cmd &= ~PCI_X_CMD_ERO;
7193 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7197 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7199 /* Chip reset on 5780 will reset MSI enable bit,
7200 * so need to restore it.
7202 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7205 pci_read_config_word(tp->pdev,
7206 tp->msi_cap + PCI_MSI_FLAGS,
7208 pci_write_config_word(tp->pdev,
7209 tp->msi_cap + PCI_MSI_FLAGS,
7210 ctrl | PCI_MSI_FLAGS_ENABLE);
7211 val = tr32(MSGINT_MODE);
7212 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7217 static void tg3_stop_fw(struct tg3 *);
7219 /* tp->lock is held. */
7220 static int tg3_chip_reset(struct tg3 *tp)
7223 void (*write_op)(struct tg3 *, u32, u32);
7228 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7230 /* No matching tg3_nvram_unlock() after this because
7231 * chip reset below will undo the nvram lock.
7233 tp->nvram_lock_cnt = 0;
7235 /* GRC_MISC_CFG core clock reset will clear the memory
7236 * enable bit in PCI register 4 and the MSI enable bit
7237 * on some chips, so we save relevant registers here.
7239 tg3_save_pci_state(tp);
7241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7242 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7243 tw32(GRC_FASTBOOT_PC, 0);
7246 * We must avoid the readl() that normally takes place.
7247 * It locks machines, causes machine checks, and other
7248 * fun things. So, temporarily disable the 5701
7249 * hardware workaround, while we do the reset.
7251 write_op = tp->write32;
7252 if (write_op == tg3_write_flush_reg32)
7253 tp->write32 = tg3_write32;
7255 /* Prevent the irq handler from reading or writing PCI registers
7256 * during chip reset when the memory enable bit in the PCI command
7257 * register may be cleared. The chip does not generate interrupt
7258 * at this time, but the irq handler may still be called due to irq
7259 * sharing or irqpoll.
7261 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7262 for (i = 0; i < tp->irq_cnt; i++) {
7263 struct tg3_napi *tnapi = &tp->napi[i];
7264 if (tnapi->hw_status) {
7265 tnapi->hw_status->status = 0;
7266 tnapi->hw_status->status_tag = 0;
7268 tnapi->last_tag = 0;
7269 tnapi->last_irq_tag = 0;
7273 for (i = 0; i < tp->irq_cnt; i++)
7274 synchronize_irq(tp->napi[i].irq_vec);
7276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7277 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7278 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7282 val = GRC_MISC_CFG_CORECLK_RESET;
7284 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7285 /* Force PCIe 1.0a mode */
7286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7287 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
7288 tr32(TG3_PCIE_PHY_TSTCTL) ==
7289 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7290 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7292 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7293 tw32(GRC_MISC_CFG, (1 << 29));
7298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7299 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7300 tw32(GRC_VCPU_EXT_CTRL,
7301 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7304 /* Manage gphy power for all CPMU absent PCIe devices. */
7305 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7306 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7307 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7309 tw32(GRC_MISC_CFG, val);
7311 /* restore 5701 hardware bug workaround write method */
7312 tp->write32 = write_op;
7314 /* Unfortunately, we have to delay before the PCI read back.
7315 * Some 575X chips even will not respond to a PCI cfg access
7316 * when the reset command is given to the chip.
7318 * How do these hardware designers expect things to work
7319 * properly if the PCI write is posted for a long period
7320 * of time? It is always necessary to have some method by
7321 * which a register read back can occur to push the write
7322 * out which does the reset.
7324 * For most tg3 variants the trick below was working.
7329 /* Flush PCI posted writes. The normal MMIO registers
7330 * are inaccessible at this time so this is the only
7331 * way to make this reliably (actually, this is no longer
7332 * the case, see above). I tried to use indirect
7333 * register read/write but this upset some 5701 variants.
7335 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7339 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7342 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7346 /* Wait for link training to complete. */
7347 for (i = 0; i < 5000; i++)
7350 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7351 pci_write_config_dword(tp->pdev, 0xc4,
7352 cfg_val | (1 << 15));
7355 /* Clear the "no snoop" and "relaxed ordering" bits. */
7356 pci_read_config_word(tp->pdev,
7357 tp->pcie_cap + PCI_EXP_DEVCTL,
7359 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7360 PCI_EXP_DEVCTL_NOSNOOP_EN);
7362 * Older PCIe devices only support the 128 byte
7363 * MPS setting. Enforce the restriction.
7365 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7366 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7367 pci_write_config_word(tp->pdev,
7368 tp->pcie_cap + PCI_EXP_DEVCTL,
7371 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7373 /* Clear error status */
7374 pci_write_config_word(tp->pdev,
7375 tp->pcie_cap + PCI_EXP_DEVSTA,
7376 PCI_EXP_DEVSTA_CED |
7377 PCI_EXP_DEVSTA_NFED |
7378 PCI_EXP_DEVSTA_FED |
7379 PCI_EXP_DEVSTA_URD);
7382 tg3_restore_pci_state(tp);
7384 tp->tg3_flags &= ~(TG3_FLAG_CHIP_RESETTING |
7385 TG3_FLAG_ERROR_PROCESSED);
7388 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7389 val = tr32(MEMARB_MODE);
7390 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7392 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7394 tw32(0x5000, 0x400);
7397 tw32(GRC_MODE, tp->grc_mode);
7399 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7402 tw32(0xc4, val | (1 << 15));
7405 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7407 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7408 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7409 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7410 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7413 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7414 tp->mac_mode = MAC_MODE_APE_TX_EN |
7415 MAC_MODE_APE_RX_EN |
7416 MAC_MODE_TDE_ENABLE;
7418 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7419 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7421 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7422 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7427 tw32_f(MAC_MODE, val);
7430 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7432 err = tg3_poll_fw(tp);
7438 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7439 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7441 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
7444 tw32(0x7c00, val | (1 << 25));
7447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7448 val = tr32(TG3_CPMU_CLCK_ORIDE);
7449 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7452 /* Reprobe ASF enable state. */
7453 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7454 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7455 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7456 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7459 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7460 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7461 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7462 tp->last_event_jiffies = jiffies;
7463 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7464 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7471 /* tp->lock is held. */
7472 static void tg3_stop_fw(struct tg3 *tp)
7474 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7475 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7476 /* Wait for RX cpu to ACK the previous event. */
7477 tg3_wait_for_event_ack(tp);
7479 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7481 tg3_generate_fw_event(tp);
7483 /* Wait for RX cpu to ACK this event. */
7484 tg3_wait_for_event_ack(tp);
7488 /* tp->lock is held. */
7489 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7495 tg3_write_sig_pre_reset(tp, kind);
7497 tg3_abort_hw(tp, silent);
7498 err = tg3_chip_reset(tp);
7500 __tg3_set_mac_addr(tp, 0);
7502 tg3_write_sig_legacy(tp, kind);
7503 tg3_write_sig_post_reset(tp, kind);
7511 #define RX_CPU_SCRATCH_BASE 0x30000
7512 #define RX_CPU_SCRATCH_SIZE 0x04000
7513 #define TX_CPU_SCRATCH_BASE 0x34000
7514 #define TX_CPU_SCRATCH_SIZE 0x04000
7516 /* tp->lock is held. */
7517 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7521 BUG_ON(offset == TX_CPU_BASE &&
7522 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7525 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7527 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7530 if (offset == RX_CPU_BASE) {
7531 for (i = 0; i < 10000; i++) {
7532 tw32(offset + CPU_STATE, 0xffffffff);
7533 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7534 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7538 tw32(offset + CPU_STATE, 0xffffffff);
7539 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7542 for (i = 0; i < 10000; i++) {
7543 tw32(offset + CPU_STATE, 0xffffffff);
7544 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7545 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7551 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7552 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7556 /* Clear firmware's nvram arbitration. */
7557 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7558 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7563 unsigned int fw_base;
7564 unsigned int fw_len;
7565 const __be32 *fw_data;
7568 /* tp->lock is held. */
7569 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7570 int cpu_scratch_size, struct fw_info *info)
7572 int err, lock_err, i;
7573 void (*write_op)(struct tg3 *, u32, u32);
7575 if (cpu_base == TX_CPU_BASE &&
7576 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7578 "%s: Trying to load TX cpu firmware which is 5705\n",
7583 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7584 write_op = tg3_write_mem;
7586 write_op = tg3_write_indirect_reg32;
7588 /* It is possible that bootcode is still loading at this point.
7589 * Get the nvram lock first before halting the cpu.
7591 lock_err = tg3_nvram_lock(tp);
7592 err = tg3_halt_cpu(tp, cpu_base);
7594 tg3_nvram_unlock(tp);
7598 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7599 write_op(tp, cpu_scratch_base + i, 0);
7600 tw32(cpu_base + CPU_STATE, 0xffffffff);
7601 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7602 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7603 write_op(tp, (cpu_scratch_base +
7604 (info->fw_base & 0xffff) +
7606 be32_to_cpu(info->fw_data[i]));
7614 /* tp->lock is held. */
7615 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7617 struct fw_info info;
7618 const __be32 *fw_data;
7621 fw_data = (void *)tp->fw->data;
7623 /* Firmware blob starts with version numbers, followed by
7624 start address and length. We are setting complete length.
7625 length = end_address_of_bss - start_address_of_text.
7626 Remainder is the blob to be loaded contiguously
7627 from start address. */
7629 info.fw_base = be32_to_cpu(fw_data[1]);
7630 info.fw_len = tp->fw->size - 12;
7631 info.fw_data = &fw_data[3];
7633 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7634 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7639 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7640 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7645 /* Now startup only the RX cpu. */
7646 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7647 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7649 for (i = 0; i < 5; i++) {
7650 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7652 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7653 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7654 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7658 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7659 "should be %08x\n", __func__,
7660 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7663 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7664 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7669 /* 5705 needs a special version of the TSO firmware. */
7671 /* tp->lock is held. */
7672 static int tg3_load_tso_firmware(struct tg3 *tp)
7674 struct fw_info info;
7675 const __be32 *fw_data;
7676 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7679 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7682 fw_data = (void *)tp->fw->data;
7684 /* Firmware blob starts with version numbers, followed by
7685 start address and length. We are setting complete length.
7686 length = end_address_of_bss - start_address_of_text.
7687 Remainder is the blob to be loaded contiguously
7688 from start address. */
7690 info.fw_base = be32_to_cpu(fw_data[1]);
7691 cpu_scratch_size = tp->fw_len;
7692 info.fw_len = tp->fw->size - 12;
7693 info.fw_data = &fw_data[3];
7695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7696 cpu_base = RX_CPU_BASE;
7697 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7699 cpu_base = TX_CPU_BASE;
7700 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7701 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7704 err = tg3_load_firmware_cpu(tp, cpu_base,
7705 cpu_scratch_base, cpu_scratch_size,
7710 /* Now startup the cpu. */
7711 tw32(cpu_base + CPU_STATE, 0xffffffff);
7712 tw32_f(cpu_base + CPU_PC, info.fw_base);
7714 for (i = 0; i < 5; i++) {
7715 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7717 tw32(cpu_base + CPU_STATE, 0xffffffff);
7718 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7719 tw32_f(cpu_base + CPU_PC, info.fw_base);
7724 "%s fails to set CPU PC, is %08x should be %08x\n",
7725 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7728 tw32(cpu_base + CPU_STATE, 0xffffffff);
7729 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7734 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7736 struct tg3 *tp = netdev_priv(dev);
7737 struct sockaddr *addr = p;
7738 int err = 0, skip_mac_1 = 0;
7740 if (!is_valid_ether_addr(addr->sa_data))
7743 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7745 if (!netif_running(dev))
7748 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7749 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7751 addr0_high = tr32(MAC_ADDR_0_HIGH);
7752 addr0_low = tr32(MAC_ADDR_0_LOW);
7753 addr1_high = tr32(MAC_ADDR_1_HIGH);
7754 addr1_low = tr32(MAC_ADDR_1_LOW);
7756 /* Skip MAC addr 1 if ASF is using it. */
7757 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7758 !(addr1_high == 0 && addr1_low == 0))
7761 spin_lock_bh(&tp->lock);
7762 __tg3_set_mac_addr(tp, skip_mac_1);
7763 spin_unlock_bh(&tp->lock);
7768 /* tp->lock is held. */
7769 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7770 dma_addr_t mapping, u32 maxlen_flags,
7774 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7775 ((u64) mapping >> 32));
7777 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7778 ((u64) mapping & 0xffffffff));
7780 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7783 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7785 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7789 static void __tg3_set_rx_mode(struct net_device *);
7790 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7794 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7795 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7796 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7797 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7799 tw32(HOSTCC_TXCOL_TICKS, 0);
7800 tw32(HOSTCC_TXMAX_FRAMES, 0);
7801 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7804 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7805 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7806 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7807 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7809 tw32(HOSTCC_RXCOL_TICKS, 0);
7810 tw32(HOSTCC_RXMAX_FRAMES, 0);
7811 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7814 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7815 u32 val = ec->stats_block_coalesce_usecs;
7817 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7818 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7820 if (!netif_carrier_ok(tp->dev))
7823 tw32(HOSTCC_STAT_COAL_TICKS, val);
7826 for (i = 0; i < tp->irq_cnt - 1; i++) {
7829 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7830 tw32(reg, ec->rx_coalesce_usecs);
7831 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_max_coalesced_frames);
7833 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7834 tw32(reg, ec->rx_max_coalesced_frames_irq);
7836 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7837 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7838 tw32(reg, ec->tx_coalesce_usecs);
7839 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_max_coalesced_frames);
7841 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7842 tw32(reg, ec->tx_max_coalesced_frames_irq);
7846 for (; i < tp->irq_max - 1; i++) {
7847 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7848 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7849 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7851 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7852 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7853 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7854 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7859 /* tp->lock is held. */
7860 static void tg3_rings_reset(struct tg3 *tp)
7863 u32 stblk, txrcb, rxrcb, limit;
7864 struct tg3_napi *tnapi = &tp->napi[0];
7866 /* Disable all transmit rings but the first. */
7867 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7868 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7869 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7871 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7874 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7876 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7877 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7878 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7879 BDINFO_FLAGS_DISABLED);
7882 /* Disable all receive return rings but the first. */
7883 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7884 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7885 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7886 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7887 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7889 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7891 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7893 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7894 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7895 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7896 BDINFO_FLAGS_DISABLED);
7898 /* Disable interrupts */
7899 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7901 /* Zero mailbox registers. */
7902 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7903 for (i = 1; i < tp->irq_max; i++) {
7904 tp->napi[i].tx_prod = 0;
7905 tp->napi[i].tx_cons = 0;
7906 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7907 tw32_mailbox(tp->napi[i].prodmbox, 0);
7908 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7909 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7911 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7912 tw32_mailbox(tp->napi[0].prodmbox, 0);
7914 tp->napi[0].tx_prod = 0;
7915 tp->napi[0].tx_cons = 0;
7916 tw32_mailbox(tp->napi[0].prodmbox, 0);
7917 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7920 /* Make sure the NIC-based send BD rings are disabled. */
7921 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7922 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7923 for (i = 0; i < 16; i++)
7924 tw32_tx_mbox(mbox + i * 8, 0);
7927 txrcb = NIC_SRAM_SEND_RCB;
7928 rxrcb = NIC_SRAM_RCV_RET_RCB;
7930 /* Clear status block in ram. */
7931 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7933 /* Set status block DMA address */
7934 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7935 ((u64) tnapi->status_mapping >> 32));
7936 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7937 ((u64) tnapi->status_mapping & 0xffffffff));
7939 if (tnapi->tx_ring) {
7940 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7941 (TG3_TX_RING_SIZE <<
7942 BDINFO_FLAGS_MAXLEN_SHIFT),
7943 NIC_SRAM_TX_BUFFER_DESC);
7944 txrcb += TG3_BDINFO_SIZE;
7947 if (tnapi->rx_rcb) {
7948 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7949 (tp->rx_ret_ring_mask + 1) <<
7950 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7951 rxrcb += TG3_BDINFO_SIZE;
7954 stblk = HOSTCC_STATBLCK_RING1;
7956 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7957 u64 mapping = (u64)tnapi->status_mapping;
7958 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7959 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7961 /* Clear status block in ram. */
7962 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7964 if (tnapi->tx_ring) {
7965 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7966 (TG3_TX_RING_SIZE <<
7967 BDINFO_FLAGS_MAXLEN_SHIFT),
7968 NIC_SRAM_TX_BUFFER_DESC);
7969 txrcb += TG3_BDINFO_SIZE;
7972 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7973 ((tp->rx_ret_ring_mask + 1) <<
7974 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7977 rxrcb += TG3_BDINFO_SIZE;
7981 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7983 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7985 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS) ||
7986 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7989 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7990 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7992 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7994 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7996 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7997 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7999 val = min(nic_rep_thresh, host_rep_thresh);
8000 tw32(RCVBDI_STD_THRESH, val);
8002 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
8003 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8005 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
8006 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8009 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8010 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8012 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8014 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8016 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8017 tw32(RCVBDI_JUMBO_THRESH, val);
8019 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
8020 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8023 /* tp->lock is held. */
8024 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8026 u32 val, rdmac_mode;
8028 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8030 tg3_disable_ints(tp);
8034 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8036 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
8037 tg3_abort_hw(tp, 1);
8039 /* Enable MAC control of LPI */
8040 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8041 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8042 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8043 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8045 tw32_f(TG3_CPMU_EEE_CTRL,
8046 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8048 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8049 TG3_CPMU_EEEMD_LPI_IN_TX |
8050 TG3_CPMU_EEEMD_LPI_IN_RX |
8051 TG3_CPMU_EEEMD_EEE_ENABLE;
8053 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8054 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8056 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8057 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8059 tw32_f(TG3_CPMU_EEE_MODE, val);
8061 tw32_f(TG3_CPMU_EEE_DBTMR1,
8062 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8063 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8065 tw32_f(TG3_CPMU_EEE_DBTMR2,
8066 TG3_CPMU_DBTMR2_APE_TX_2047US |
8067 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8073 err = tg3_chip_reset(tp);
8077 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8079 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8080 val = tr32(TG3_CPMU_CTRL);
8081 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8082 tw32(TG3_CPMU_CTRL, val);
8084 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8085 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8086 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8087 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8089 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8090 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8091 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8092 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8094 val = tr32(TG3_CPMU_HST_ACC);
8095 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8096 val |= CPMU_HST_ACC_MACCLK_6_25;
8097 tw32(TG3_CPMU_HST_ACC, val);
8100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8101 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8102 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8103 PCIE_PWR_MGMT_L1_THRESH_4MS;
8104 tw32(PCIE_PWR_MGMT_THRESH, val);
8106 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8107 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8109 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8111 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8112 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8115 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
8116 u32 grc_mode = tr32(GRC_MODE);
8118 /* Access the lower 1K of PL PCIE block registers. */
8119 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8120 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8122 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8123 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8124 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8126 tw32(GRC_MODE, grc_mode);
8129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8130 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8131 u32 grc_mode = tr32(GRC_MODE);
8133 /* Access the lower 1K of PL PCIE block registers. */
8134 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8135 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8137 val = tr32(TG3_PCIE_TLDLPL_PORT +
8138 TG3_PCIE_PL_LO_PHYCTL5);
8139 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8140 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8142 tw32(GRC_MODE, grc_mode);
8145 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8146 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8147 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8148 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8151 /* This works around an issue with Athlon chipsets on
8152 * B3 tigon3 silicon. This bit has no effect on any
8153 * other revision. But do not set this on PCI Express
8154 * chips and don't even touch the clocks if the CPMU is present.
8156 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
8157 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8158 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8159 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8162 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8163 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8164 val = tr32(TG3PCI_PCISTATE);
8165 val |= PCISTATE_RETRY_SAME_DMA;
8166 tw32(TG3PCI_PCISTATE, val);
8169 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
8170 /* Allow reads and writes to the
8171 * APE register and memory space.
8173 val = tr32(TG3PCI_PCISTATE);
8174 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8175 PCISTATE_ALLOW_APE_SHMEM_WR |
8176 PCISTATE_ALLOW_APE_PSPACE_WR;
8177 tw32(TG3PCI_PCISTATE, val);
8180 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8181 /* Enable some hw fixes. */
8182 val = tr32(TG3PCI_MSI_DATA);
8183 val |= (1 << 26) | (1 << 28) | (1 << 29);
8184 tw32(TG3PCI_MSI_DATA, val);
8187 /* Descriptor ring init may make accesses to the
8188 * NIC SRAM area to setup the TX descriptors, so we
8189 * can only do this after the hardware has been
8190 * successfully reset.
8192 err = tg3_init_rings(tp);
8196 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8197 val = tr32(TG3PCI_DMA_RW_CTRL) &
8198 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8199 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8200 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8201 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8203 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8204 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8205 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8206 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8207 /* This value is determined during the probe time DMA
8208 * engine test, tg3_test_dma.
8210 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8213 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8214 GRC_MODE_4X_NIC_SEND_RINGS |
8215 GRC_MODE_NO_TX_PHDR_CSUM |
8216 GRC_MODE_NO_RX_PHDR_CSUM);
8217 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8219 /* Pseudo-header checksum is done by hardware logic and not
8220 * the offload processers, so make the chip do the pseudo-
8221 * header checksums on receive. For transmit it is more
8222 * convenient to do the pseudo-header checksum in software
8223 * as Linux does that on transmit for us in all cases.
8225 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8229 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8231 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8232 val = tr32(GRC_MISC_CFG);
8234 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8235 tw32(GRC_MISC_CFG, val);
8237 /* Initialize MBUF/DESC pool. */
8238 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8240 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8241 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8242 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8243 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8245 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8246 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8247 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8248 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8251 fw_len = tp->fw_len;
8252 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8253 tw32(BUFMGR_MB_POOL_ADDR,
8254 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8255 tw32(BUFMGR_MB_POOL_SIZE,
8256 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8259 if (tp->dev->mtu <= ETH_DATA_LEN) {
8260 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8261 tp->bufmgr_config.mbuf_read_dma_low_water);
8262 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8263 tp->bufmgr_config.mbuf_mac_rx_low_water);
8264 tw32(BUFMGR_MB_HIGH_WATER,
8265 tp->bufmgr_config.mbuf_high_water);
8267 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8268 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8269 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8270 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8271 tw32(BUFMGR_MB_HIGH_WATER,
8272 tp->bufmgr_config.mbuf_high_water_jumbo);
8274 tw32(BUFMGR_DMA_LOW_WATER,
8275 tp->bufmgr_config.dma_low_water);
8276 tw32(BUFMGR_DMA_HIGH_WATER,
8277 tp->bufmgr_config.dma_high_water);
8279 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8281 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8283 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8284 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8285 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8286 tw32(BUFMGR_MODE, val);
8287 for (i = 0; i < 2000; i++) {
8288 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8293 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8297 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8298 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8300 tg3_setup_rxbd_thresholds(tp);
8302 /* Initialize TG3_BDINFO's at:
8303 * RCVDBDI_STD_BD: standard eth size rx ring
8304 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8305 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8308 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8309 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8310 * ring attribute flags
8311 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8313 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8314 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8316 * The size of each ring is fixed in the firmware, but the location is
8319 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8320 ((u64) tpr->rx_std_mapping >> 32));
8321 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8322 ((u64) tpr->rx_std_mapping & 0xffffffff));
8323 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
8324 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8325 NIC_SRAM_RX_BUFFER_DESC);
8327 /* Disable the mini ring */
8328 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8329 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8330 BDINFO_FLAGS_DISABLED);
8332 /* Program the jumbo buffer descriptor ring control
8333 * blocks on those devices that have them.
8335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8336 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8337 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8339 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8340 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8341 ((u64) tpr->rx_jmb_mapping >> 32));
8342 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8343 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8344 val = TG3_RX_JMB_RING_SIZE(tp) <<
8345 BDINFO_FLAGS_MAXLEN_SHIFT;
8346 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8347 val | BDINFO_FLAGS_USE_EXT_RECV);
8348 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8350 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8351 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8353 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8354 BDINFO_FLAGS_DISABLED);
8357 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8359 val = TG3_RX_STD_MAX_SIZE_5700;
8361 val = TG3_RX_STD_MAX_SIZE_5717;
8362 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8363 val |= (TG3_RX_STD_DMA_SZ << 2);
8365 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8367 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8369 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8371 tpr->rx_std_prod_idx = tp->rx_pending;
8372 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8374 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8375 tp->rx_jumbo_pending : 0;
8376 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8378 tg3_rings_reset(tp);
8380 /* Initialize MAC address and backoff seed. */
8381 __tg3_set_mac_addr(tp, 0);
8383 /* MTU + ethernet header + FCS + optional VLAN tag */
8384 tw32(MAC_RX_MTU_SIZE,
8385 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8387 /* The slot time is changed by tg3_setup_phy if we
8388 * run at gigabit with half duplex.
8390 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8391 (6 << TX_LENGTHS_IPG_SHIFT) |
8392 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8395 val |= tr32(MAC_TX_LENGTHS) &
8396 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8397 TX_LENGTHS_CNT_DWN_VAL_MSK);
8399 tw32(MAC_TX_LENGTHS, val);
8401 /* Receive rules. */
8402 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8403 tw32(RCVLPC_CONFIG, 0x0181);
8405 /* Calculate RDMAC_MODE setting early, we need it to determine
8406 * the RCVLPC_STATE_ENABLE mask.
8408 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8409 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8410 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8411 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8412 RDMAC_MODE_LNGREAD_ENAB);
8414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8415 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8420 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8421 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8422 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8425 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8426 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8428 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8429 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8430 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8431 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8435 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8436 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8438 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8439 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8441 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8444 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8447 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8453 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
8454 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8457 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8458 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8459 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8460 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8461 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8462 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8464 tw32(TG3_RDMA_RSRVCTRL_REG,
8465 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8471 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8472 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8473 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8476 /* Receive/send statistics. */
8477 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8478 val = tr32(RCVLPC_STATS_ENABLE);
8479 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8480 tw32(RCVLPC_STATS_ENABLE, val);
8481 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8482 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8483 val = tr32(RCVLPC_STATS_ENABLE);
8484 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8485 tw32(RCVLPC_STATS_ENABLE, val);
8487 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8489 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8490 tw32(SNDDATAI_STATSENAB, 0xffffff);
8491 tw32(SNDDATAI_STATSCTRL,
8492 (SNDDATAI_SCTRL_ENABLE |
8493 SNDDATAI_SCTRL_FASTUPD));
8495 /* Setup host coalescing engine. */
8496 tw32(HOSTCC_MODE, 0);
8497 for (i = 0; i < 2000; i++) {
8498 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8503 __tg3_set_coalesce(tp, &tp->coal);
8505 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8506 /* Status/statistics block address. See tg3_timer,
8507 * the tg3_periodic_fetch_stats call there, and
8508 * tg3_get_stats to see how this works for 5705/5750 chips.
8510 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8511 ((u64) tp->stats_mapping >> 32));
8512 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8513 ((u64) tp->stats_mapping & 0xffffffff));
8514 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8516 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8518 /* Clear statistics and status block memory areas */
8519 for (i = NIC_SRAM_STATS_BLK;
8520 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8522 tg3_write_mem(tp, i, 0);
8527 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8529 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8530 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8531 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8532 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8534 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8535 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8536 /* reset to prevent losing 1st rx packet intermittently */
8537 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8541 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8542 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8545 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8546 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8547 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8548 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8550 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8551 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8554 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8555 * If TG3_FLG2_IS_NIC is zero, we should read the
8556 * register to preserve the GPIO settings for LOMs. The GPIOs,
8557 * whether used as inputs or outputs, are set by boot code after
8560 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8563 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8564 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8565 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8568 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8569 GRC_LCLCTRL_GPIO_OUTPUT3;
8571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8572 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8574 tp->grc_local_ctrl &= ~gpio_mask;
8575 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8577 /* GPIO1 must be driven high for eeprom write protect */
8578 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8579 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8580 GRC_LCLCTRL_GPIO_OUTPUT1);
8582 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8585 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8587 val = tr32(MSGINT_MODE);
8588 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8589 tw32(MSGINT_MODE, val);
8592 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8593 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8597 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8598 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8599 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8600 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8601 WDMAC_MODE_LNGREAD_ENAB);
8603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8604 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8605 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8606 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8607 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8609 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8610 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8611 val |= WDMAC_MODE_RX_ACCEL;
8615 /* Enable host coalescing bug fix */
8616 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8617 val |= WDMAC_MODE_STATUS_TAG_FIX;
8619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8620 val |= WDMAC_MODE_BURST_ALL_DATA;
8622 tw32_f(WDMAC_MODE, val);
8625 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8628 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8631 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8632 pcix_cmd |= PCI_X_CMD_READ_2K;
8633 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8634 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8635 pcix_cmd |= PCI_X_CMD_READ_2K;
8637 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8641 tw32_f(RDMAC_MODE, rdmac_mode);
8644 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8645 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8646 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8650 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8652 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8654 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8655 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8656 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8657 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
8658 val |= RCVDBDI_MODE_LRG_RING_SZ;
8659 tw32(RCVDBDI_MODE, val);
8660 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8661 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8662 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8663 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8664 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8665 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8666 tw32(SNDBDI_MODE, val);
8667 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8669 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8670 err = tg3_load_5701_a0_firmware_fix(tp);
8675 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8676 err = tg3_load_tso_firmware(tp);
8681 tp->tx_mode = TX_MODE_ENABLE;
8683 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8685 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8688 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8689 tp->tx_mode &= ~val;
8690 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8693 tw32_f(MAC_TX_MODE, tp->tx_mode);
8696 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8697 u32 reg = MAC_RSS_INDIR_TBL_0;
8698 u8 *ent = (u8 *)&val;
8700 /* Setup the indirection table */
8701 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8702 int idx = i % sizeof(val);
8704 ent[idx] = i % (tp->irq_cnt - 1);
8705 if (idx == sizeof(val) - 1) {
8711 /* Setup the "secret" hash key. */
8712 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8713 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8714 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8715 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8716 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8717 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8718 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8719 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8720 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8721 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8724 tp->rx_mode = RX_MODE_ENABLE;
8725 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8726 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8728 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8729 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8730 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8731 RX_MODE_RSS_IPV6_HASH_EN |
8732 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8733 RX_MODE_RSS_IPV4_HASH_EN |
8734 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8736 tw32_f(MAC_RX_MODE, tp->rx_mode);
8739 tw32(MAC_LED_CTRL, tp->led_ctrl);
8741 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8742 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8743 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8746 tw32_f(MAC_RX_MODE, tp->rx_mode);
8749 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8750 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8751 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8752 /* Set drive transmission level to 1.2V */
8753 /* only if the signal pre-emphasis bit is not set */
8754 val = tr32(MAC_SERDES_CFG);
8757 tw32(MAC_SERDES_CFG, val);
8759 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8760 tw32(MAC_SERDES_CFG, 0x616000);
8763 /* Prevent chip from dropping frames when flow control
8766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8770 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8773 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8774 /* Use hardware link auto-negotiation */
8775 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8778 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8779 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8782 tmp = tr32(SERDES_RX_CTRL);
8783 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8784 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8785 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8786 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8789 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8790 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8791 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8792 tp->link_config.speed = tp->link_config.orig_speed;
8793 tp->link_config.duplex = tp->link_config.orig_duplex;
8794 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8797 err = tg3_setup_phy(tp, 0);
8801 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8802 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8805 /* Clear CRC stats. */
8806 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8807 tg3_writephy(tp, MII_TG3_TEST1,
8808 tmp | MII_TG3_TEST1_CRC_EN);
8809 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8814 __tg3_set_rx_mode(tp->dev);
8816 /* Initialize receive rules. */
8817 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8818 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8819 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8820 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8822 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8823 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8827 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8831 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8833 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8835 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8837 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8839 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8841 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8843 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8845 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8847 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8849 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8851 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8853 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8855 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8857 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8865 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8866 /* Write our heartbeat update interval to APE. */
8867 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8868 APE_HOST_HEARTBEAT_INT_DISABLE);
8870 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8875 /* Called at device open time to get the chip ready for
8876 * packet processing. Invoked with tp->lock held.
8878 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8880 tg3_switch_clocks(tp);
8882 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8884 return tg3_reset_hw(tp, reset_phy);
8887 #define TG3_STAT_ADD32(PSTAT, REG) \
8888 do { u32 __val = tr32(REG); \
8889 (PSTAT)->low += __val; \
8890 if ((PSTAT)->low < __val) \
8891 (PSTAT)->high += 1; \
8894 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8896 struct tg3_hw_stats *sp = tp->hw_stats;
8898 if (!netif_carrier_ok(tp->dev))
8901 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8902 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8903 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8904 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8905 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8906 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8907 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8908 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8909 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8910 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8911 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8912 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8913 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8915 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8916 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8917 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8918 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8919 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8920 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8921 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8922 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8923 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8924 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8925 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8926 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8927 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8928 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8930 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8932 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8934 u32 val = tr32(HOSTCC_FLOW_ATTN);
8935 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8937 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8938 sp->rx_discards.low += val;
8939 if (sp->rx_discards.low < val)
8940 sp->rx_discards.high += 1;
8942 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8944 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8947 static void tg3_timer(unsigned long __opaque)
8949 struct tg3 *tp = (struct tg3 *) __opaque;
8954 spin_lock(&tp->lock);
8956 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8957 /* All of this garbage is because when using non-tagged
8958 * IRQ status the mailbox/status_block protocol the chip
8959 * uses with the cpu is race prone.
8961 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8962 tw32(GRC_LOCAL_CTRL,
8963 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8965 tw32(HOSTCC_MODE, tp->coalesce_mode |
8966 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8969 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8970 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8971 spin_unlock(&tp->lock);
8972 schedule_work(&tp->reset_task);
8977 /* This part only runs once per second. */
8978 if (!--tp->timer_counter) {
8979 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8980 tg3_periodic_fetch_stats(tp);
8982 if (tp->setlpicnt && !--tp->setlpicnt) {
8983 u32 val = tr32(TG3_CPMU_EEE_MODE);
8984 tw32(TG3_CPMU_EEE_MODE,
8985 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8988 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8992 mac_stat = tr32(MAC_STATUS);
8995 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8996 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8998 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9002 tg3_setup_phy(tp, 0);
9003 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
9004 u32 mac_stat = tr32(MAC_STATUS);
9007 if (netif_carrier_ok(tp->dev) &&
9008 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9011 if (!netif_carrier_ok(tp->dev) &&
9012 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9013 MAC_STATUS_SIGNAL_DET))) {
9017 if (!tp->serdes_counter) {
9020 ~MAC_MODE_PORT_MODE_MASK));
9022 tw32_f(MAC_MODE, tp->mac_mode);
9025 tg3_setup_phy(tp, 0);
9027 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9028 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9029 tg3_serdes_parallel_detect(tp);
9032 tp->timer_counter = tp->timer_multiplier;
9035 /* Heartbeat is only sent once every 2 seconds.
9037 * The heartbeat is to tell the ASF firmware that the host
9038 * driver is still alive. In the event that the OS crashes,
9039 * ASF needs to reset the hardware to free up the FIFO space
9040 * that may be filled with rx packets destined for the host.
9041 * If the FIFO is full, ASF will no longer function properly.
9043 * Unintended resets have been reported on real time kernels
9044 * where the timer doesn't run on time. Netpoll will also have
9047 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9048 * to check the ring condition when the heartbeat is expiring
9049 * before doing the reset. This will prevent most unintended
9052 if (!--tp->asf_counter) {
9053 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
9054 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
9055 tg3_wait_for_event_ack(tp);
9057 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9058 FWCMD_NICDRV_ALIVE3);
9059 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9060 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9061 TG3_FW_UPDATE_TIMEOUT_SEC);
9063 tg3_generate_fw_event(tp);
9065 tp->asf_counter = tp->asf_multiplier;
9068 spin_unlock(&tp->lock);
9071 tp->timer.expires = jiffies + tp->timer_offset;
9072 add_timer(&tp->timer);
9075 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9078 unsigned long flags;
9080 struct tg3_napi *tnapi = &tp->napi[irq_num];
9082 if (tp->irq_cnt == 1)
9083 name = tp->dev->name;
9085 name = &tnapi->irq_lbl[0];
9086 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9087 name[IFNAMSIZ-1] = 0;
9090 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9092 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
9097 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9098 fn = tg3_interrupt_tagged;
9099 flags = IRQF_SHARED;
9102 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9105 static int tg3_test_interrupt(struct tg3 *tp)
9107 struct tg3_napi *tnapi = &tp->napi[0];
9108 struct net_device *dev = tp->dev;
9109 int err, i, intr_ok = 0;
9112 if (!netif_running(dev))
9115 tg3_disable_ints(tp);
9117 free_irq(tnapi->irq_vec, tnapi);
9120 * Turn off MSI one shot mode. Otherwise this test has no
9121 * observable way to know whether the interrupt was delivered.
9123 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9124 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9125 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9126 tw32(MSGINT_MODE, val);
9129 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9130 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9134 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9135 tg3_enable_ints(tp);
9137 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9140 for (i = 0; i < 5; i++) {
9141 u32 int_mbox, misc_host_ctrl;
9143 int_mbox = tr32_mailbox(tnapi->int_mbox);
9144 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9146 if ((int_mbox != 0) ||
9147 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9155 tg3_disable_ints(tp);
9157 free_irq(tnapi->irq_vec, tnapi);
9159 err = tg3_request_irq(tp, 0);
9165 /* Reenable MSI one shot mode. */
9166 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9167 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9168 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9169 tw32(MSGINT_MODE, val);
9177 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9178 * successfully restored
9180 static int tg3_test_msi(struct tg3 *tp)
9185 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
9188 /* Turn off SERR reporting in case MSI terminates with Master
9191 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9192 pci_write_config_word(tp->pdev, PCI_COMMAND,
9193 pci_cmd & ~PCI_COMMAND_SERR);
9195 err = tg3_test_interrupt(tp);
9197 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9202 /* other failures */
9206 /* MSI test failed, go back to INTx mode */
9207 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9208 "to INTx mode. Please report this failure to the PCI "
9209 "maintainer and include system chipset information\n");
9211 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9213 pci_disable_msi(tp->pdev);
9215 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
9216 tp->napi[0].irq_vec = tp->pdev->irq;
9218 err = tg3_request_irq(tp, 0);
9222 /* Need to reset the chip because the MSI cycle may have terminated
9223 * with Master Abort.
9225 tg3_full_lock(tp, 1);
9227 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9228 err = tg3_init_hw(tp, 1);
9230 tg3_full_unlock(tp);
9233 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9238 static int tg3_request_firmware(struct tg3 *tp)
9240 const __be32 *fw_data;
9242 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9243 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9248 fw_data = (void *)tp->fw->data;
9250 /* Firmware blob starts with version numbers, followed by
9251 * start address and _full_ length including BSS sections
9252 * (which must be longer than the actual data, of course
9255 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9256 if (tp->fw_len < (tp->fw->size - 12)) {
9257 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9258 tp->fw_len, tp->fw_needed);
9259 release_firmware(tp->fw);
9264 /* We no longer need firmware; we have it. */
9265 tp->fw_needed = NULL;
9269 static bool tg3_enable_msix(struct tg3 *tp)
9271 int i, rc, cpus = num_online_cpus();
9272 struct msix_entry msix_ent[tp->irq_max];
9275 /* Just fallback to the simpler MSI mode. */
9279 * We want as many rx rings enabled as there are cpus.
9280 * The first MSIX vector only deals with link interrupts, etc,
9281 * so we add one to the number of vectors we are requesting.
9283 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9285 for (i = 0; i < tp->irq_max; i++) {
9286 msix_ent[i].entry = i;
9287 msix_ent[i].vector = 0;
9290 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9293 } else if (rc != 0) {
9294 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9296 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9301 for (i = 0; i < tp->irq_max; i++)
9302 tp->napi[i].irq_vec = msix_ent[i].vector;
9304 netif_set_real_num_tx_queues(tp->dev, 1);
9305 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9306 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9307 pci_disable_msix(tp->pdev);
9311 if (tp->irq_cnt > 1) {
9312 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9316 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9317 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9324 static void tg3_ints_init(struct tg3 *tp)
9326 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9327 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9328 /* All MSI supporting chips should support tagged
9329 * status. Assert that this is the case.
9331 netdev_warn(tp->dev,
9332 "MSI without TAGGED_STATUS? Not using MSI\n");
9336 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9337 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9338 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9339 pci_enable_msi(tp->pdev) == 0)
9340 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9342 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9343 u32 msi_mode = tr32(MSGINT_MODE);
9344 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9346 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9347 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9350 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9352 tp->napi[0].irq_vec = tp->pdev->irq;
9353 netif_set_real_num_tx_queues(tp->dev, 1);
9354 netif_set_real_num_rx_queues(tp->dev, 1);
9358 static void tg3_ints_fini(struct tg3 *tp)
9360 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9361 pci_disable_msix(tp->pdev);
9362 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9363 pci_disable_msi(tp->pdev);
9364 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9365 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9368 static int tg3_open(struct net_device *dev)
9370 struct tg3 *tp = netdev_priv(dev);
9373 if (tp->fw_needed) {
9374 err = tg3_request_firmware(tp);
9375 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9379 netdev_warn(tp->dev, "TSO capability disabled\n");
9380 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9381 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9382 netdev_notice(tp->dev, "TSO capability restored\n");
9383 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9387 netif_carrier_off(tp->dev);
9389 err = tg3_power_up(tp);
9393 tg3_full_lock(tp, 0);
9395 tg3_disable_ints(tp);
9396 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9398 tg3_full_unlock(tp);
9401 * Setup interrupts first so we know how
9402 * many NAPI resources to allocate
9406 /* The placement of this call is tied
9407 * to the setup and use of Host TX descriptors.
9409 err = tg3_alloc_consistent(tp);
9415 tg3_napi_enable(tp);
9417 for (i = 0; i < tp->irq_cnt; i++) {
9418 struct tg3_napi *tnapi = &tp->napi[i];
9419 err = tg3_request_irq(tp, i);
9421 for (i--; i >= 0; i--)
9422 free_irq(tnapi->irq_vec, tnapi);
9430 tg3_full_lock(tp, 0);
9432 err = tg3_init_hw(tp, 1);
9434 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9437 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9438 tp->timer_offset = HZ;
9440 tp->timer_offset = HZ / 10;
9442 BUG_ON(tp->timer_offset > HZ);
9443 tp->timer_counter = tp->timer_multiplier =
9444 (HZ / tp->timer_offset);
9445 tp->asf_counter = tp->asf_multiplier =
9446 ((HZ / tp->timer_offset) * 2);
9448 init_timer(&tp->timer);
9449 tp->timer.expires = jiffies + tp->timer_offset;
9450 tp->timer.data = (unsigned long) tp;
9451 tp->timer.function = tg3_timer;
9454 tg3_full_unlock(tp);
9459 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9460 err = tg3_test_msi(tp);
9463 tg3_full_lock(tp, 0);
9464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9466 tg3_full_unlock(tp);
9471 if (!(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9472 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9473 u32 val = tr32(PCIE_TRANSACTION_CFG);
9475 tw32(PCIE_TRANSACTION_CFG,
9476 val | PCIE_TRANS_CFG_1SHOT_MSI);
9482 tg3_full_lock(tp, 0);
9484 add_timer(&tp->timer);
9485 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9486 tg3_enable_ints(tp);
9488 tg3_full_unlock(tp);
9490 netif_tx_start_all_queues(dev);
9495 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9496 struct tg3_napi *tnapi = &tp->napi[i];
9497 free_irq(tnapi->irq_vec, tnapi);
9501 tg3_napi_disable(tp);
9503 tg3_free_consistent(tp);
9510 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9511 struct rtnl_link_stats64 *);
9512 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9514 static int tg3_close(struct net_device *dev)
9517 struct tg3 *tp = netdev_priv(dev);
9519 tg3_napi_disable(tp);
9520 cancel_work_sync(&tp->reset_task);
9522 netif_tx_stop_all_queues(dev);
9524 del_timer_sync(&tp->timer);
9528 tg3_full_lock(tp, 1);
9530 tg3_disable_ints(tp);
9532 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9534 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9536 tg3_full_unlock(tp);
9538 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9539 struct tg3_napi *tnapi = &tp->napi[i];
9540 free_irq(tnapi->irq_vec, tnapi);
9545 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9547 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9548 sizeof(tp->estats_prev));
9552 tg3_free_consistent(tp);
9556 netif_carrier_off(tp->dev);
9561 static inline u64 get_stat64(tg3_stat64_t *val)
9563 return ((u64)val->high << 32) | ((u64)val->low);
9566 static u64 calc_crc_errors(struct tg3 *tp)
9568 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9570 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9571 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9575 spin_lock_bh(&tp->lock);
9576 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9577 tg3_writephy(tp, MII_TG3_TEST1,
9578 val | MII_TG3_TEST1_CRC_EN);
9579 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9582 spin_unlock_bh(&tp->lock);
9584 tp->phy_crc_errors += val;
9586 return tp->phy_crc_errors;
9589 return get_stat64(&hw_stats->rx_fcs_errors);
9592 #define ESTAT_ADD(member) \
9593 estats->member = old_estats->member + \
9594 get_stat64(&hw_stats->member)
9596 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9598 struct tg3_ethtool_stats *estats = &tp->estats;
9599 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9600 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9605 ESTAT_ADD(rx_octets);
9606 ESTAT_ADD(rx_fragments);
9607 ESTAT_ADD(rx_ucast_packets);
9608 ESTAT_ADD(rx_mcast_packets);
9609 ESTAT_ADD(rx_bcast_packets);
9610 ESTAT_ADD(rx_fcs_errors);
9611 ESTAT_ADD(rx_align_errors);
9612 ESTAT_ADD(rx_xon_pause_rcvd);
9613 ESTAT_ADD(rx_xoff_pause_rcvd);
9614 ESTAT_ADD(rx_mac_ctrl_rcvd);
9615 ESTAT_ADD(rx_xoff_entered);
9616 ESTAT_ADD(rx_frame_too_long_errors);
9617 ESTAT_ADD(rx_jabbers);
9618 ESTAT_ADD(rx_undersize_packets);
9619 ESTAT_ADD(rx_in_length_errors);
9620 ESTAT_ADD(rx_out_length_errors);
9621 ESTAT_ADD(rx_64_or_less_octet_packets);
9622 ESTAT_ADD(rx_65_to_127_octet_packets);
9623 ESTAT_ADD(rx_128_to_255_octet_packets);
9624 ESTAT_ADD(rx_256_to_511_octet_packets);
9625 ESTAT_ADD(rx_512_to_1023_octet_packets);
9626 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9627 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9628 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9629 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9630 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9632 ESTAT_ADD(tx_octets);
9633 ESTAT_ADD(tx_collisions);
9634 ESTAT_ADD(tx_xon_sent);
9635 ESTAT_ADD(tx_xoff_sent);
9636 ESTAT_ADD(tx_flow_control);
9637 ESTAT_ADD(tx_mac_errors);
9638 ESTAT_ADD(tx_single_collisions);
9639 ESTAT_ADD(tx_mult_collisions);
9640 ESTAT_ADD(tx_deferred);
9641 ESTAT_ADD(tx_excessive_collisions);
9642 ESTAT_ADD(tx_late_collisions);
9643 ESTAT_ADD(tx_collide_2times);
9644 ESTAT_ADD(tx_collide_3times);
9645 ESTAT_ADD(tx_collide_4times);
9646 ESTAT_ADD(tx_collide_5times);
9647 ESTAT_ADD(tx_collide_6times);
9648 ESTAT_ADD(tx_collide_7times);
9649 ESTAT_ADD(tx_collide_8times);
9650 ESTAT_ADD(tx_collide_9times);
9651 ESTAT_ADD(tx_collide_10times);
9652 ESTAT_ADD(tx_collide_11times);
9653 ESTAT_ADD(tx_collide_12times);
9654 ESTAT_ADD(tx_collide_13times);
9655 ESTAT_ADD(tx_collide_14times);
9656 ESTAT_ADD(tx_collide_15times);
9657 ESTAT_ADD(tx_ucast_packets);
9658 ESTAT_ADD(tx_mcast_packets);
9659 ESTAT_ADD(tx_bcast_packets);
9660 ESTAT_ADD(tx_carrier_sense_errors);
9661 ESTAT_ADD(tx_discards);
9662 ESTAT_ADD(tx_errors);
9664 ESTAT_ADD(dma_writeq_full);
9665 ESTAT_ADD(dma_write_prioq_full);
9666 ESTAT_ADD(rxbds_empty);
9667 ESTAT_ADD(rx_discards);
9668 ESTAT_ADD(rx_errors);
9669 ESTAT_ADD(rx_threshold_hit);
9671 ESTAT_ADD(dma_readq_full);
9672 ESTAT_ADD(dma_read_prioq_full);
9673 ESTAT_ADD(tx_comp_queue_full);
9675 ESTAT_ADD(ring_set_send_prod_index);
9676 ESTAT_ADD(ring_status_update);
9677 ESTAT_ADD(nic_irqs);
9678 ESTAT_ADD(nic_avoided_irqs);
9679 ESTAT_ADD(nic_tx_threshold_hit);
9684 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9685 struct rtnl_link_stats64 *stats)
9687 struct tg3 *tp = netdev_priv(dev);
9688 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9689 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9694 stats->rx_packets = old_stats->rx_packets +
9695 get_stat64(&hw_stats->rx_ucast_packets) +
9696 get_stat64(&hw_stats->rx_mcast_packets) +
9697 get_stat64(&hw_stats->rx_bcast_packets);
9699 stats->tx_packets = old_stats->tx_packets +
9700 get_stat64(&hw_stats->tx_ucast_packets) +
9701 get_stat64(&hw_stats->tx_mcast_packets) +
9702 get_stat64(&hw_stats->tx_bcast_packets);
9704 stats->rx_bytes = old_stats->rx_bytes +
9705 get_stat64(&hw_stats->rx_octets);
9706 stats->tx_bytes = old_stats->tx_bytes +
9707 get_stat64(&hw_stats->tx_octets);
9709 stats->rx_errors = old_stats->rx_errors +
9710 get_stat64(&hw_stats->rx_errors);
9711 stats->tx_errors = old_stats->tx_errors +
9712 get_stat64(&hw_stats->tx_errors) +
9713 get_stat64(&hw_stats->tx_mac_errors) +
9714 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9715 get_stat64(&hw_stats->tx_discards);
9717 stats->multicast = old_stats->multicast +
9718 get_stat64(&hw_stats->rx_mcast_packets);
9719 stats->collisions = old_stats->collisions +
9720 get_stat64(&hw_stats->tx_collisions);
9722 stats->rx_length_errors = old_stats->rx_length_errors +
9723 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9724 get_stat64(&hw_stats->rx_undersize_packets);
9726 stats->rx_over_errors = old_stats->rx_over_errors +
9727 get_stat64(&hw_stats->rxbds_empty);
9728 stats->rx_frame_errors = old_stats->rx_frame_errors +
9729 get_stat64(&hw_stats->rx_align_errors);
9730 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9731 get_stat64(&hw_stats->tx_discards);
9732 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9733 get_stat64(&hw_stats->tx_carrier_sense_errors);
9735 stats->rx_crc_errors = old_stats->rx_crc_errors +
9736 calc_crc_errors(tp);
9738 stats->rx_missed_errors = old_stats->rx_missed_errors +
9739 get_stat64(&hw_stats->rx_discards);
9741 stats->rx_dropped = tp->rx_dropped;
9746 static inline u32 calc_crc(unsigned char *buf, int len)
9754 for (j = 0; j < len; j++) {
9757 for (k = 0; k < 8; k++) {
9770 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9772 /* accept or reject all multicast frames */
9773 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9774 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9775 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9776 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9779 static void __tg3_set_rx_mode(struct net_device *dev)
9781 struct tg3 *tp = netdev_priv(dev);
9784 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9785 RX_MODE_KEEP_VLAN_TAG);
9787 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9788 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9791 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9792 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9795 if (dev->flags & IFF_PROMISC) {
9796 /* Promiscuous mode. */
9797 rx_mode |= RX_MODE_PROMISC;
9798 } else if (dev->flags & IFF_ALLMULTI) {
9799 /* Accept all multicast. */
9800 tg3_set_multi(tp, 1);
9801 } else if (netdev_mc_empty(dev)) {
9802 /* Reject all multicast. */
9803 tg3_set_multi(tp, 0);
9805 /* Accept one or more multicast(s). */
9806 struct netdev_hw_addr *ha;
9807 u32 mc_filter[4] = { 0, };
9812 netdev_for_each_mc_addr(ha, dev) {
9813 crc = calc_crc(ha->addr, ETH_ALEN);
9815 regidx = (bit & 0x60) >> 5;
9817 mc_filter[regidx] |= (1 << bit);
9820 tw32(MAC_HASH_REG_0, mc_filter[0]);
9821 tw32(MAC_HASH_REG_1, mc_filter[1]);
9822 tw32(MAC_HASH_REG_2, mc_filter[2]);
9823 tw32(MAC_HASH_REG_3, mc_filter[3]);
9826 if (rx_mode != tp->rx_mode) {
9827 tp->rx_mode = rx_mode;
9828 tw32_f(MAC_RX_MODE, rx_mode);
9833 static void tg3_set_rx_mode(struct net_device *dev)
9835 struct tg3 *tp = netdev_priv(dev);
9837 if (!netif_running(dev))
9840 tg3_full_lock(tp, 0);
9841 __tg3_set_rx_mode(dev);
9842 tg3_full_unlock(tp);
9845 static int tg3_get_regs_len(struct net_device *dev)
9847 return TG3_REG_BLK_SIZE;
9850 static void tg3_get_regs(struct net_device *dev,
9851 struct ethtool_regs *regs, void *_p)
9853 struct tg3 *tp = netdev_priv(dev);
9857 memset(_p, 0, TG3_REG_BLK_SIZE);
9859 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9862 tg3_full_lock(tp, 0);
9864 tg3_dump_legacy_regs(tp, (u32 *)_p);
9866 tg3_full_unlock(tp);
9869 static int tg3_get_eeprom_len(struct net_device *dev)
9871 struct tg3 *tp = netdev_priv(dev);
9873 return tp->nvram_size;
9876 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9878 struct tg3 *tp = netdev_priv(dev);
9881 u32 i, offset, len, b_offset, b_count;
9884 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9887 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9890 offset = eeprom->offset;
9894 eeprom->magic = TG3_EEPROM_MAGIC;
9897 /* adjustments to start on required 4 byte boundary */
9898 b_offset = offset & 3;
9899 b_count = 4 - b_offset;
9900 if (b_count > len) {
9901 /* i.e. offset=1 len=2 */
9904 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9907 memcpy(data, ((char *)&val) + b_offset, b_count);
9910 eeprom->len += b_count;
9913 /* read bytes up to the last 4 byte boundary */
9914 pd = &data[eeprom->len];
9915 for (i = 0; i < (len - (len & 3)); i += 4) {
9916 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9921 memcpy(pd + i, &val, 4);
9926 /* read last bytes not ending on 4 byte boundary */
9927 pd = &data[eeprom->len];
9929 b_offset = offset + len - b_count;
9930 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9933 memcpy(pd, &val, b_count);
9934 eeprom->len += b_count;
9939 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9941 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9943 struct tg3 *tp = netdev_priv(dev);
9945 u32 offset, len, b_offset, odd_len;
9949 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9952 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9953 eeprom->magic != TG3_EEPROM_MAGIC)
9956 offset = eeprom->offset;
9959 if ((b_offset = (offset & 3))) {
9960 /* adjustments to start on required 4 byte boundary */
9961 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9972 /* adjustments to end on required 4 byte boundary */
9974 len = (len + 3) & ~3;
9975 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9981 if (b_offset || odd_len) {
9982 buf = kmalloc(len, GFP_KERNEL);
9986 memcpy(buf, &start, 4);
9988 memcpy(buf+len-4, &end, 4);
9989 memcpy(buf + b_offset, data, eeprom->len);
9992 ret = tg3_nvram_write_block(tp, offset, len, buf);
10000 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10002 struct tg3 *tp = netdev_priv(dev);
10004 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10005 struct phy_device *phydev;
10006 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10008 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10009 return phy_ethtool_gset(phydev, cmd);
10012 cmd->supported = (SUPPORTED_Autoneg);
10014 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10015 cmd->supported |= (SUPPORTED_1000baseT_Half |
10016 SUPPORTED_1000baseT_Full);
10018 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10019 cmd->supported |= (SUPPORTED_100baseT_Half |
10020 SUPPORTED_100baseT_Full |
10021 SUPPORTED_10baseT_Half |
10022 SUPPORTED_10baseT_Full |
10024 cmd->port = PORT_TP;
10026 cmd->supported |= SUPPORTED_FIBRE;
10027 cmd->port = PORT_FIBRE;
10030 cmd->advertising = tp->link_config.advertising;
10031 if (netif_running(dev)) {
10032 cmd->speed = tp->link_config.active_speed;
10033 cmd->duplex = tp->link_config.active_duplex;
10035 cmd->speed = SPEED_INVALID;
10036 cmd->duplex = DUPLEX_INVALID;
10038 cmd->phy_address = tp->phy_addr;
10039 cmd->transceiver = XCVR_INTERNAL;
10040 cmd->autoneg = tp->link_config.autoneg;
10046 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10048 struct tg3 *tp = netdev_priv(dev);
10050 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10051 struct phy_device *phydev;
10052 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10054 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10055 return phy_ethtool_sset(phydev, cmd);
10058 if (cmd->autoneg != AUTONEG_ENABLE &&
10059 cmd->autoneg != AUTONEG_DISABLE)
10062 if (cmd->autoneg == AUTONEG_DISABLE &&
10063 cmd->duplex != DUPLEX_FULL &&
10064 cmd->duplex != DUPLEX_HALF)
10067 if (cmd->autoneg == AUTONEG_ENABLE) {
10068 u32 mask = ADVERTISED_Autoneg |
10070 ADVERTISED_Asym_Pause;
10072 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10073 mask |= ADVERTISED_1000baseT_Half |
10074 ADVERTISED_1000baseT_Full;
10076 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10077 mask |= ADVERTISED_100baseT_Half |
10078 ADVERTISED_100baseT_Full |
10079 ADVERTISED_10baseT_Half |
10080 ADVERTISED_10baseT_Full |
10083 mask |= ADVERTISED_FIBRE;
10085 if (cmd->advertising & ~mask)
10088 mask &= (ADVERTISED_1000baseT_Half |
10089 ADVERTISED_1000baseT_Full |
10090 ADVERTISED_100baseT_Half |
10091 ADVERTISED_100baseT_Full |
10092 ADVERTISED_10baseT_Half |
10093 ADVERTISED_10baseT_Full);
10095 cmd->advertising &= mask;
10097 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10098 if (cmd->speed != SPEED_1000)
10101 if (cmd->duplex != DUPLEX_FULL)
10104 if (cmd->speed != SPEED_100 &&
10105 cmd->speed != SPEED_10)
10110 tg3_full_lock(tp, 0);
10112 tp->link_config.autoneg = cmd->autoneg;
10113 if (cmd->autoneg == AUTONEG_ENABLE) {
10114 tp->link_config.advertising = (cmd->advertising |
10115 ADVERTISED_Autoneg);
10116 tp->link_config.speed = SPEED_INVALID;
10117 tp->link_config.duplex = DUPLEX_INVALID;
10119 tp->link_config.advertising = 0;
10120 tp->link_config.speed = cmd->speed;
10121 tp->link_config.duplex = cmd->duplex;
10124 tp->link_config.orig_speed = tp->link_config.speed;
10125 tp->link_config.orig_duplex = tp->link_config.duplex;
10126 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10128 if (netif_running(dev))
10129 tg3_setup_phy(tp, 1);
10131 tg3_full_unlock(tp);
10136 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10138 struct tg3 *tp = netdev_priv(dev);
10140 strcpy(info->driver, DRV_MODULE_NAME);
10141 strcpy(info->version, DRV_MODULE_VERSION);
10142 strcpy(info->fw_version, tp->fw_ver);
10143 strcpy(info->bus_info, pci_name(tp->pdev));
10146 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10148 struct tg3 *tp = netdev_priv(dev);
10150 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
10151 device_can_wakeup(&tp->pdev->dev))
10152 wol->supported = WAKE_MAGIC;
10154 wol->supported = 0;
10156 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
10157 device_can_wakeup(&tp->pdev->dev))
10158 wol->wolopts = WAKE_MAGIC;
10159 memset(&wol->sopass, 0, sizeof(wol->sopass));
10162 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10164 struct tg3 *tp = netdev_priv(dev);
10165 struct device *dp = &tp->pdev->dev;
10167 if (wol->wolopts & ~WAKE_MAGIC)
10169 if ((wol->wolopts & WAKE_MAGIC) &&
10170 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
10173 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10175 spin_lock_bh(&tp->lock);
10176 if (device_may_wakeup(dp))
10177 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10179 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10180 spin_unlock_bh(&tp->lock);
10186 static u32 tg3_get_msglevel(struct net_device *dev)
10188 struct tg3 *tp = netdev_priv(dev);
10189 return tp->msg_enable;
10192 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10194 struct tg3 *tp = netdev_priv(dev);
10195 tp->msg_enable = value;
10198 static int tg3_nway_reset(struct net_device *dev)
10200 struct tg3 *tp = netdev_priv(dev);
10203 if (!netif_running(dev))
10206 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10209 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10210 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10212 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10216 spin_lock_bh(&tp->lock);
10218 tg3_readphy(tp, MII_BMCR, &bmcr);
10219 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10220 ((bmcr & BMCR_ANENABLE) ||
10221 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10222 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10226 spin_unlock_bh(&tp->lock);
10232 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10234 struct tg3 *tp = netdev_priv(dev);
10236 ering->rx_max_pending = tp->rx_std_ring_mask;
10237 ering->rx_mini_max_pending = 0;
10238 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10239 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10241 ering->rx_jumbo_max_pending = 0;
10243 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10245 ering->rx_pending = tp->rx_pending;
10246 ering->rx_mini_pending = 0;
10247 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10248 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10250 ering->rx_jumbo_pending = 0;
10252 ering->tx_pending = tp->napi[0].tx_pending;
10255 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10257 struct tg3 *tp = netdev_priv(dev);
10258 int i, irq_sync = 0, err = 0;
10260 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10261 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10262 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10263 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10264 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10265 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10268 if (netif_running(dev)) {
10270 tg3_netif_stop(tp);
10274 tg3_full_lock(tp, irq_sync);
10276 tp->rx_pending = ering->rx_pending;
10278 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10279 tp->rx_pending > 63)
10280 tp->rx_pending = 63;
10281 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10283 for (i = 0; i < tp->irq_max; i++)
10284 tp->napi[i].tx_pending = ering->tx_pending;
10286 if (netif_running(dev)) {
10287 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10288 err = tg3_restart_hw(tp, 1);
10290 tg3_netif_start(tp);
10293 tg3_full_unlock(tp);
10295 if (irq_sync && !err)
10301 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10303 struct tg3 *tp = netdev_priv(dev);
10305 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10307 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10308 epause->rx_pause = 1;
10310 epause->rx_pause = 0;
10312 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10313 epause->tx_pause = 1;
10315 epause->tx_pause = 0;
10318 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10320 struct tg3 *tp = netdev_priv(dev);
10323 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10325 struct phy_device *phydev;
10327 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10329 if (!(phydev->supported & SUPPORTED_Pause) ||
10330 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10331 (epause->rx_pause != epause->tx_pause)))
10334 tp->link_config.flowctrl = 0;
10335 if (epause->rx_pause) {
10336 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10338 if (epause->tx_pause) {
10339 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10340 newadv = ADVERTISED_Pause;
10342 newadv = ADVERTISED_Pause |
10343 ADVERTISED_Asym_Pause;
10344 } else if (epause->tx_pause) {
10345 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10346 newadv = ADVERTISED_Asym_Pause;
10350 if (epause->autoneg)
10351 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10353 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10355 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10356 u32 oldadv = phydev->advertising &
10357 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10358 if (oldadv != newadv) {
10359 phydev->advertising &=
10360 ~(ADVERTISED_Pause |
10361 ADVERTISED_Asym_Pause);
10362 phydev->advertising |= newadv;
10363 if (phydev->autoneg) {
10365 * Always renegotiate the link to
10366 * inform our link partner of our
10367 * flow control settings, even if the
10368 * flow control is forced. Let
10369 * tg3_adjust_link() do the final
10370 * flow control setup.
10372 return phy_start_aneg(phydev);
10376 if (!epause->autoneg)
10377 tg3_setup_flow_control(tp, 0, 0);
10379 tp->link_config.orig_advertising &=
10380 ~(ADVERTISED_Pause |
10381 ADVERTISED_Asym_Pause);
10382 tp->link_config.orig_advertising |= newadv;
10387 if (netif_running(dev)) {
10388 tg3_netif_stop(tp);
10392 tg3_full_lock(tp, irq_sync);
10394 if (epause->autoneg)
10395 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10397 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10398 if (epause->rx_pause)
10399 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10401 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10402 if (epause->tx_pause)
10403 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10405 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10407 if (netif_running(dev)) {
10408 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10409 err = tg3_restart_hw(tp, 1);
10411 tg3_netif_start(tp);
10414 tg3_full_unlock(tp);
10420 static int tg3_get_sset_count(struct net_device *dev, int sset)
10424 return TG3_NUM_TEST;
10426 return TG3_NUM_STATS;
10428 return -EOPNOTSUPP;
10432 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10434 switch (stringset) {
10436 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10439 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10442 WARN_ON(1); /* we need a WARN() */
10447 static int tg3_set_phys_id(struct net_device *dev,
10448 enum ethtool_phys_id_state state)
10450 struct tg3 *tp = netdev_priv(dev);
10452 if (!netif_running(tp->dev))
10456 case ETHTOOL_ID_ACTIVE:
10457 return 1; /* cycle on/off once per second */
10459 case ETHTOOL_ID_ON:
10460 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10461 LED_CTRL_1000MBPS_ON |
10462 LED_CTRL_100MBPS_ON |
10463 LED_CTRL_10MBPS_ON |
10464 LED_CTRL_TRAFFIC_OVERRIDE |
10465 LED_CTRL_TRAFFIC_BLINK |
10466 LED_CTRL_TRAFFIC_LED);
10469 case ETHTOOL_ID_OFF:
10470 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10471 LED_CTRL_TRAFFIC_OVERRIDE);
10474 case ETHTOOL_ID_INACTIVE:
10475 tw32(MAC_LED_CTRL, tp->led_ctrl);
10482 static void tg3_get_ethtool_stats(struct net_device *dev,
10483 struct ethtool_stats *estats, u64 *tmp_stats)
10485 struct tg3 *tp = netdev_priv(dev);
10486 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10489 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10493 u32 offset = 0, len = 0;
10496 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10497 tg3_nvram_read(tp, 0, &magic))
10500 if (magic == TG3_EEPROM_MAGIC) {
10501 for (offset = TG3_NVM_DIR_START;
10502 offset < TG3_NVM_DIR_END;
10503 offset += TG3_NVM_DIRENT_SIZE) {
10504 if (tg3_nvram_read(tp, offset, &val))
10507 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10508 TG3_NVM_DIRTYPE_EXTVPD)
10512 if (offset != TG3_NVM_DIR_END) {
10513 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10514 if (tg3_nvram_read(tp, offset + 4, &offset))
10517 offset = tg3_nvram_logical_addr(tp, offset);
10521 if (!offset || !len) {
10522 offset = TG3_NVM_VPD_OFF;
10523 len = TG3_NVM_VPD_LEN;
10526 buf = kmalloc(len, GFP_KERNEL);
10530 if (magic == TG3_EEPROM_MAGIC) {
10531 for (i = 0; i < len; i += 4) {
10532 /* The data is in little-endian format in NVRAM.
10533 * Use the big-endian read routines to preserve
10534 * the byte order as it exists in NVRAM.
10536 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10542 unsigned int pos = 0;
10544 ptr = (u8 *)&buf[0];
10545 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10546 cnt = pci_read_vpd(tp->pdev, pos,
10548 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10564 #define NVRAM_TEST_SIZE 0x100
10565 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10566 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10567 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10568 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10569 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10571 static int tg3_test_nvram(struct tg3 *tp)
10575 int i, j, k, err = 0, size;
10577 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10580 if (tg3_nvram_read(tp, 0, &magic) != 0)
10583 if (magic == TG3_EEPROM_MAGIC)
10584 size = NVRAM_TEST_SIZE;
10585 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10586 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10587 TG3_EEPROM_SB_FORMAT_1) {
10588 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10589 case TG3_EEPROM_SB_REVISION_0:
10590 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10592 case TG3_EEPROM_SB_REVISION_2:
10593 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10595 case TG3_EEPROM_SB_REVISION_3:
10596 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10603 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10604 size = NVRAM_SELFBOOT_HW_SIZE;
10608 buf = kmalloc(size, GFP_KERNEL);
10613 for (i = 0, j = 0; i < size; i += 4, j++) {
10614 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10621 /* Selfboot format */
10622 magic = be32_to_cpu(buf[0]);
10623 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10624 TG3_EEPROM_MAGIC_FW) {
10625 u8 *buf8 = (u8 *) buf, csum8 = 0;
10627 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10628 TG3_EEPROM_SB_REVISION_2) {
10629 /* For rev 2, the csum doesn't include the MBA. */
10630 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10632 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10635 for (i = 0; i < size; i++)
10648 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10649 TG3_EEPROM_MAGIC_HW) {
10650 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10651 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10652 u8 *buf8 = (u8 *) buf;
10654 /* Separate the parity bits and the data bytes. */
10655 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10656 if ((i == 0) || (i == 8)) {
10660 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10661 parity[k++] = buf8[i] & msk;
10663 } else if (i == 16) {
10667 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10668 parity[k++] = buf8[i] & msk;
10671 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10672 parity[k++] = buf8[i] & msk;
10675 data[j++] = buf8[i];
10679 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10680 u8 hw8 = hweight8(data[i]);
10682 if ((hw8 & 0x1) && parity[i])
10684 else if (!(hw8 & 0x1) && !parity[i])
10693 /* Bootstrap checksum at offset 0x10 */
10694 csum = calc_crc((unsigned char *) buf, 0x10);
10695 if (csum != le32_to_cpu(buf[0x10/4]))
10698 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10699 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10700 if (csum != le32_to_cpu(buf[0xfc/4]))
10705 buf = tg3_vpd_readblock(tp);
10709 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10710 PCI_VPD_LRDT_RO_DATA);
10712 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10716 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10719 i += PCI_VPD_LRDT_TAG_SIZE;
10720 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10721 PCI_VPD_RO_KEYWORD_CHKSUM);
10725 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10727 for (i = 0; i <= j; i++)
10728 csum8 += ((u8 *)buf)[i];
10742 #define TG3_SERDES_TIMEOUT_SEC 2
10743 #define TG3_COPPER_TIMEOUT_SEC 6
10745 static int tg3_test_link(struct tg3 *tp)
10749 if (!netif_running(tp->dev))
10752 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10753 max = TG3_SERDES_TIMEOUT_SEC;
10755 max = TG3_COPPER_TIMEOUT_SEC;
10757 for (i = 0; i < max; i++) {
10758 if (netif_carrier_ok(tp->dev))
10761 if (msleep_interruptible(1000))
10768 /* Only test the commonly used registers */
10769 static int tg3_test_registers(struct tg3 *tp)
10771 int i, is_5705, is_5750;
10772 u32 offset, read_mask, write_mask, val, save_val, read_val;
10776 #define TG3_FL_5705 0x1
10777 #define TG3_FL_NOT_5705 0x2
10778 #define TG3_FL_NOT_5788 0x4
10779 #define TG3_FL_NOT_5750 0x8
10783 /* MAC Control Registers */
10784 { MAC_MODE, TG3_FL_NOT_5705,
10785 0x00000000, 0x00ef6f8c },
10786 { MAC_MODE, TG3_FL_5705,
10787 0x00000000, 0x01ef6b8c },
10788 { MAC_STATUS, TG3_FL_NOT_5705,
10789 0x03800107, 0x00000000 },
10790 { MAC_STATUS, TG3_FL_5705,
10791 0x03800100, 0x00000000 },
10792 { MAC_ADDR_0_HIGH, 0x0000,
10793 0x00000000, 0x0000ffff },
10794 { MAC_ADDR_0_LOW, 0x0000,
10795 0x00000000, 0xffffffff },
10796 { MAC_RX_MTU_SIZE, 0x0000,
10797 0x00000000, 0x0000ffff },
10798 { MAC_TX_MODE, 0x0000,
10799 0x00000000, 0x00000070 },
10800 { MAC_TX_LENGTHS, 0x0000,
10801 0x00000000, 0x00003fff },
10802 { MAC_RX_MODE, TG3_FL_NOT_5705,
10803 0x00000000, 0x000007fc },
10804 { MAC_RX_MODE, TG3_FL_5705,
10805 0x00000000, 0x000007dc },
10806 { MAC_HASH_REG_0, 0x0000,
10807 0x00000000, 0xffffffff },
10808 { MAC_HASH_REG_1, 0x0000,
10809 0x00000000, 0xffffffff },
10810 { MAC_HASH_REG_2, 0x0000,
10811 0x00000000, 0xffffffff },
10812 { MAC_HASH_REG_3, 0x0000,
10813 0x00000000, 0xffffffff },
10815 /* Receive Data and Receive BD Initiator Control Registers. */
10816 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10817 0x00000000, 0xffffffff },
10818 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10819 0x00000000, 0xffffffff },
10820 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10821 0x00000000, 0x00000003 },
10822 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10823 0x00000000, 0xffffffff },
10824 { RCVDBDI_STD_BD+0, 0x0000,
10825 0x00000000, 0xffffffff },
10826 { RCVDBDI_STD_BD+4, 0x0000,
10827 0x00000000, 0xffffffff },
10828 { RCVDBDI_STD_BD+8, 0x0000,
10829 0x00000000, 0xffff0002 },
10830 { RCVDBDI_STD_BD+0xc, 0x0000,
10831 0x00000000, 0xffffffff },
10833 /* Receive BD Initiator Control Registers. */
10834 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10835 0x00000000, 0xffffffff },
10836 { RCVBDI_STD_THRESH, TG3_FL_5705,
10837 0x00000000, 0x000003ff },
10838 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10839 0x00000000, 0xffffffff },
10841 /* Host Coalescing Control Registers. */
10842 { HOSTCC_MODE, TG3_FL_NOT_5705,
10843 0x00000000, 0x00000004 },
10844 { HOSTCC_MODE, TG3_FL_5705,
10845 0x00000000, 0x000000f6 },
10846 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10847 0x00000000, 0xffffffff },
10848 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10849 0x00000000, 0x000003ff },
10850 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10851 0x00000000, 0xffffffff },
10852 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10853 0x00000000, 0x000003ff },
10854 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10855 0x00000000, 0xffffffff },
10856 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10857 0x00000000, 0x000000ff },
10858 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10859 0x00000000, 0xffffffff },
10860 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10861 0x00000000, 0x000000ff },
10862 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10863 0x00000000, 0xffffffff },
10864 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10865 0x00000000, 0xffffffff },
10866 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10867 0x00000000, 0xffffffff },
10868 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10869 0x00000000, 0x000000ff },
10870 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10871 0x00000000, 0xffffffff },
10872 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10873 0x00000000, 0x000000ff },
10874 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10875 0x00000000, 0xffffffff },
10876 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10877 0x00000000, 0xffffffff },
10878 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10879 0x00000000, 0xffffffff },
10880 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10881 0x00000000, 0xffffffff },
10882 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10883 0x00000000, 0xffffffff },
10884 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10885 0xffffffff, 0x00000000 },
10886 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10887 0xffffffff, 0x00000000 },
10889 /* Buffer Manager Control Registers. */
10890 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10891 0x00000000, 0x007fff80 },
10892 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10893 0x00000000, 0x007fffff },
10894 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10895 0x00000000, 0x0000003f },
10896 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10897 0x00000000, 0x000001ff },
10898 { BUFMGR_MB_HIGH_WATER, 0x0000,
10899 0x00000000, 0x000001ff },
10900 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10901 0xffffffff, 0x00000000 },
10902 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10903 0xffffffff, 0x00000000 },
10905 /* Mailbox Registers */
10906 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10907 0x00000000, 0x000001ff },
10908 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10909 0x00000000, 0x000001ff },
10910 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10911 0x00000000, 0x000007ff },
10912 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10913 0x00000000, 0x000001ff },
10915 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10918 is_5705 = is_5750 = 0;
10919 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10921 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10925 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10926 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10929 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10932 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10933 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10936 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10939 offset = (u32) reg_tbl[i].offset;
10940 read_mask = reg_tbl[i].read_mask;
10941 write_mask = reg_tbl[i].write_mask;
10943 /* Save the original register content */
10944 save_val = tr32(offset);
10946 /* Determine the read-only value. */
10947 read_val = save_val & read_mask;
10949 /* Write zero to the register, then make sure the read-only bits
10950 * are not changed and the read/write bits are all zeros.
10954 val = tr32(offset);
10956 /* Test the read-only and read/write bits. */
10957 if (((val & read_mask) != read_val) || (val & write_mask))
10960 /* Write ones to all the bits defined by RdMask and WrMask, then
10961 * make sure the read-only bits are not changed and the
10962 * read/write bits are all ones.
10964 tw32(offset, read_mask | write_mask);
10966 val = tr32(offset);
10968 /* Test the read-only bits. */
10969 if ((val & read_mask) != read_val)
10972 /* Test the read/write bits. */
10973 if ((val & write_mask) != write_mask)
10976 tw32(offset, save_val);
10982 if (netif_msg_hw(tp))
10983 netdev_err(tp->dev,
10984 "Register test failed at offset %x\n", offset);
10985 tw32(offset, save_val);
10989 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10991 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10995 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10996 for (j = 0; j < len; j += 4) {
10999 tg3_write_mem(tp, offset + j, test_pattern[i]);
11000 tg3_read_mem(tp, offset + j, &val);
11001 if (val != test_pattern[i])
11008 static int tg3_test_memory(struct tg3 *tp)
11010 static struct mem_entry {
11013 } mem_tbl_570x[] = {
11014 { 0x00000000, 0x00b50},
11015 { 0x00002000, 0x1c000},
11016 { 0xffffffff, 0x00000}
11017 }, mem_tbl_5705[] = {
11018 { 0x00000100, 0x0000c},
11019 { 0x00000200, 0x00008},
11020 { 0x00004000, 0x00800},
11021 { 0x00006000, 0x01000},
11022 { 0x00008000, 0x02000},
11023 { 0x00010000, 0x0e000},
11024 { 0xffffffff, 0x00000}
11025 }, mem_tbl_5755[] = {
11026 { 0x00000200, 0x00008},
11027 { 0x00004000, 0x00800},
11028 { 0x00006000, 0x00800},
11029 { 0x00008000, 0x02000},
11030 { 0x00010000, 0x0c000},
11031 { 0xffffffff, 0x00000}
11032 }, mem_tbl_5906[] = {
11033 { 0x00000200, 0x00008},
11034 { 0x00004000, 0x00400},
11035 { 0x00006000, 0x00400},
11036 { 0x00008000, 0x01000},
11037 { 0x00010000, 0x01000},
11038 { 0xffffffff, 0x00000}
11039 }, mem_tbl_5717[] = {
11040 { 0x00000200, 0x00008},
11041 { 0x00010000, 0x0a000},
11042 { 0x00020000, 0x13c00},
11043 { 0xffffffff, 0x00000}
11044 }, mem_tbl_57765[] = {
11045 { 0x00000200, 0x00008},
11046 { 0x00004000, 0x00800},
11047 { 0x00006000, 0x09800},
11048 { 0x00010000, 0x0a000},
11049 { 0xffffffff, 0x00000}
11051 struct mem_entry *mem_tbl;
11055 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
11056 mem_tbl = mem_tbl_5717;
11057 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11058 mem_tbl = mem_tbl_57765;
11059 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11060 mem_tbl = mem_tbl_5755;
11061 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11062 mem_tbl = mem_tbl_5906;
11063 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
11064 mem_tbl = mem_tbl_5705;
11066 mem_tbl = mem_tbl_570x;
11068 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11069 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11077 #define TG3_MAC_LOOPBACK 0
11078 #define TG3_PHY_LOOPBACK 1
11079 #define TG3_TSO_LOOPBACK 2
11081 #define TG3_TSO_MSS 500
11083 #define TG3_TSO_IP_HDR_LEN 20
11084 #define TG3_TSO_TCP_HDR_LEN 20
11085 #define TG3_TSO_TCP_OPT_LEN 12
11087 static const u8 tg3_tso_header[] = {
11089 0x45, 0x00, 0x00, 0x00,
11090 0x00, 0x00, 0x40, 0x00,
11091 0x40, 0x06, 0x00, 0x00,
11092 0x0a, 0x00, 0x00, 0x01,
11093 0x0a, 0x00, 0x00, 0x02,
11094 0x0d, 0x00, 0xe0, 0x00,
11095 0x00, 0x00, 0x01, 0x00,
11096 0x00, 0x00, 0x02, 0x00,
11097 0x80, 0x10, 0x10, 0x00,
11098 0x14, 0x09, 0x00, 0x00,
11099 0x01, 0x01, 0x08, 0x0a,
11100 0x11, 0x11, 0x11, 0x11,
11101 0x11, 0x11, 0x11, 0x11,
11104 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11106 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11107 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11108 struct sk_buff *skb, *rx_skb;
11111 int num_pkts, tx_len, rx_len, i, err;
11112 struct tg3_rx_buffer_desc *desc;
11113 struct tg3_napi *tnapi, *rnapi;
11114 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11116 tnapi = &tp->napi[0];
11117 rnapi = &tp->napi[0];
11118 if (tp->irq_cnt > 1) {
11119 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
11120 rnapi = &tp->napi[1];
11121 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
11122 tnapi = &tp->napi[1];
11124 coal_now = tnapi->coal_now | rnapi->coal_now;
11126 if (loopback_mode == TG3_MAC_LOOPBACK) {
11127 /* HW errata - mac loopback fails in some cases on 5780.
11128 * Normal traffic and PHY loopback are not affected by
11129 * errata. Also, the MAC loopback test is deprecated for
11130 * all newer ASIC revisions.
11132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11133 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
11136 mac_mode = tp->mac_mode &
11137 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11138 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11139 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11140 mac_mode |= MAC_MODE_LINK_POLARITY;
11141 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11142 mac_mode |= MAC_MODE_PORT_MODE_MII;
11144 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11145 tw32(MAC_MODE, mac_mode);
11147 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11148 tg3_phy_fet_toggle_apd(tp, false);
11149 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11151 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11153 tg3_phy_toggle_automdix(tp, 0);
11155 tg3_writephy(tp, MII_BMCR, val);
11158 mac_mode = tp->mac_mode &
11159 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11160 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11161 tg3_writephy(tp, MII_TG3_FET_PTEST,
11162 MII_TG3_FET_PTEST_FRC_TX_LINK |
11163 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11164 /* The write needs to be flushed for the AC131 */
11165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11166 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11167 mac_mode |= MAC_MODE_PORT_MODE_MII;
11169 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11171 /* reset to prevent losing 1st rx packet intermittently */
11172 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11173 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11175 tw32_f(MAC_RX_MODE, tp->rx_mode);
11177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11178 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11179 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11180 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11181 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11182 mac_mode |= MAC_MODE_LINK_POLARITY;
11183 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11184 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11186 tw32(MAC_MODE, mac_mode);
11188 /* Wait for link */
11189 for (i = 0; i < 100; i++) {
11190 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11199 skb = netdev_alloc_skb(tp->dev, tx_len);
11203 tx_data = skb_put(skb, tx_len);
11204 memcpy(tx_data, tp->dev->dev_addr, 6);
11205 memset(tx_data + 6, 0x0, 8);
11207 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11209 if (loopback_mode == TG3_TSO_LOOPBACK) {
11210 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11212 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11213 TG3_TSO_TCP_OPT_LEN;
11215 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11216 sizeof(tg3_tso_header));
11219 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11220 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11222 /* Set the total length field in the IP header */
11223 iph->tot_len = htons((u16)(mss + hdr_len));
11225 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11226 TXD_FLAG_CPU_POST_DMA);
11228 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11230 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11231 th = (struct tcphdr *)&tx_data[val];
11234 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11236 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
11237 mss |= (hdr_len & 0xc) << 12;
11238 if (hdr_len & 0x10)
11239 base_flags |= 0x00000010;
11240 base_flags |= (hdr_len & 0x3e0) << 5;
11241 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11242 mss |= hdr_len << 9;
11243 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
11244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11245 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11247 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11250 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11253 data_off = ETH_HLEN;
11256 for (i = data_off; i < tx_len; i++)
11257 tx_data[i] = (u8) (i & 0xff);
11259 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11260 if (pci_dma_mapping_error(tp->pdev, map)) {
11261 dev_kfree_skb(skb);
11265 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11270 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11272 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11273 base_flags, (mss << 1) | 1);
11277 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11278 tr32_mailbox(tnapi->prodmbox);
11282 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11283 for (i = 0; i < 35; i++) {
11284 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11289 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11290 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11291 if ((tx_idx == tnapi->tx_prod) &&
11292 (rx_idx == (rx_start_idx + num_pkts)))
11296 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11297 dev_kfree_skb(skb);
11299 if (tx_idx != tnapi->tx_prod)
11302 if (rx_idx != rx_start_idx + num_pkts)
11306 while (rx_idx != rx_start_idx) {
11307 desc = &rnapi->rx_rcb[rx_start_idx++];
11308 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11309 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11311 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11312 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11315 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11318 if (loopback_mode != TG3_TSO_LOOPBACK) {
11319 if (rx_len != tx_len)
11322 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11323 if (opaque_key != RXD_OPAQUE_RING_STD)
11326 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11329 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11330 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11331 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11335 if (opaque_key == RXD_OPAQUE_RING_STD) {
11336 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11337 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11339 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11340 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11341 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11346 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11347 PCI_DMA_FROMDEVICE);
11349 for (i = data_off; i < rx_len; i++, val++) {
11350 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11357 /* tg3_free_rings will unmap and free the rx_skb */
11362 #define TG3_STD_LOOPBACK_FAILED 1
11363 #define TG3_JMB_LOOPBACK_FAILED 2
11364 #define TG3_TSO_LOOPBACK_FAILED 4
11366 #define TG3_MAC_LOOPBACK_SHIFT 0
11367 #define TG3_PHY_LOOPBACK_SHIFT 4
11368 #define TG3_LOOPBACK_FAILED 0x00000077
11370 static int tg3_test_loopback(struct tg3 *tp)
11373 u32 eee_cap, cpmuctrl = 0;
11375 if (!netif_running(tp->dev))
11376 return TG3_LOOPBACK_FAILED;
11378 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11379 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11381 err = tg3_reset_hw(tp, 1);
11383 err = TG3_LOOPBACK_FAILED;
11387 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
11390 /* Reroute all rx packets to the 1st queue */
11391 for (i = MAC_RSS_INDIR_TBL_0;
11392 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11396 /* Turn off gphy autopowerdown. */
11397 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11398 tg3_phy_toggle_apd(tp, false);
11400 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11404 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11406 /* Wait for up to 40 microseconds to acquire lock. */
11407 for (i = 0; i < 4; i++) {
11408 status = tr32(TG3_CPMU_MUTEX_GNT);
11409 if (status == CPMU_MUTEX_GNT_DRIVER)
11414 if (status != CPMU_MUTEX_GNT_DRIVER) {
11415 err = TG3_LOOPBACK_FAILED;
11419 /* Turn off link-based power management. */
11420 cpmuctrl = tr32(TG3_CPMU_CTRL);
11421 tw32(TG3_CPMU_CTRL,
11422 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11423 CPMU_CTRL_LINK_AWARE_MODE));
11426 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11427 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11429 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11430 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11431 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11433 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11434 tw32(TG3_CPMU_CTRL, cpmuctrl);
11436 /* Release the mutex */
11437 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11440 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11441 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11442 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11443 err |= TG3_STD_LOOPBACK_FAILED <<
11444 TG3_PHY_LOOPBACK_SHIFT;
11445 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11446 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11447 err |= TG3_TSO_LOOPBACK_FAILED <<
11448 TG3_PHY_LOOPBACK_SHIFT;
11449 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11450 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11451 err |= TG3_JMB_LOOPBACK_FAILED <<
11452 TG3_PHY_LOOPBACK_SHIFT;
11455 /* Re-enable gphy autopowerdown. */
11456 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11457 tg3_phy_toggle_apd(tp, true);
11460 tp->phy_flags |= eee_cap;
11465 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11468 struct tg3 *tp = netdev_priv(dev);
11470 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11473 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11475 if (tg3_test_nvram(tp) != 0) {
11476 etest->flags |= ETH_TEST_FL_FAILED;
11479 if (tg3_test_link(tp) != 0) {
11480 etest->flags |= ETH_TEST_FL_FAILED;
11483 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11484 int err, err2 = 0, irq_sync = 0;
11486 if (netif_running(dev)) {
11488 tg3_netif_stop(tp);
11492 tg3_full_lock(tp, irq_sync);
11494 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11495 err = tg3_nvram_lock(tp);
11496 tg3_halt_cpu(tp, RX_CPU_BASE);
11497 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11498 tg3_halt_cpu(tp, TX_CPU_BASE);
11500 tg3_nvram_unlock(tp);
11502 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11505 if (tg3_test_registers(tp) != 0) {
11506 etest->flags |= ETH_TEST_FL_FAILED;
11509 if (tg3_test_memory(tp) != 0) {
11510 etest->flags |= ETH_TEST_FL_FAILED;
11513 if ((data[4] = tg3_test_loopback(tp)) != 0)
11514 etest->flags |= ETH_TEST_FL_FAILED;
11516 tg3_full_unlock(tp);
11518 if (tg3_test_interrupt(tp) != 0) {
11519 etest->flags |= ETH_TEST_FL_FAILED;
11523 tg3_full_lock(tp, 0);
11525 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11526 if (netif_running(dev)) {
11527 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11528 err2 = tg3_restart_hw(tp, 1);
11530 tg3_netif_start(tp);
11533 tg3_full_unlock(tp);
11535 if (irq_sync && !err2)
11538 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11539 tg3_power_down(tp);
11543 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11545 struct mii_ioctl_data *data = if_mii(ifr);
11546 struct tg3 *tp = netdev_priv(dev);
11549 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11550 struct phy_device *phydev;
11551 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11553 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11554 return phy_mii_ioctl(phydev, ifr, cmd);
11559 data->phy_id = tp->phy_addr;
11562 case SIOCGMIIREG: {
11565 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11566 break; /* We have no PHY */
11568 if (!netif_running(dev))
11571 spin_lock_bh(&tp->lock);
11572 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11573 spin_unlock_bh(&tp->lock);
11575 data->val_out = mii_regval;
11581 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11582 break; /* We have no PHY */
11584 if (!netif_running(dev))
11587 spin_lock_bh(&tp->lock);
11588 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11589 spin_unlock_bh(&tp->lock);
11597 return -EOPNOTSUPP;
11600 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11602 struct tg3 *tp = netdev_priv(dev);
11604 memcpy(ec, &tp->coal, sizeof(*ec));
11608 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11610 struct tg3 *tp = netdev_priv(dev);
11611 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11612 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11614 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11615 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11616 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11617 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11618 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11621 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11622 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11623 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11624 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11625 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11626 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11627 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11628 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11629 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11630 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11633 /* No rx interrupts will be generated if both are zero */
11634 if ((ec->rx_coalesce_usecs == 0) &&
11635 (ec->rx_max_coalesced_frames == 0))
11638 /* No tx interrupts will be generated if both are zero */
11639 if ((ec->tx_coalesce_usecs == 0) &&
11640 (ec->tx_max_coalesced_frames == 0))
11643 /* Only copy relevant parameters, ignore all others. */
11644 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11645 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11646 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11647 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11648 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11649 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11650 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11651 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11652 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11654 if (netif_running(dev)) {
11655 tg3_full_lock(tp, 0);
11656 __tg3_set_coalesce(tp, &tp->coal);
11657 tg3_full_unlock(tp);
11662 static const struct ethtool_ops tg3_ethtool_ops = {
11663 .get_settings = tg3_get_settings,
11664 .set_settings = tg3_set_settings,
11665 .get_drvinfo = tg3_get_drvinfo,
11666 .get_regs_len = tg3_get_regs_len,
11667 .get_regs = tg3_get_regs,
11668 .get_wol = tg3_get_wol,
11669 .set_wol = tg3_set_wol,
11670 .get_msglevel = tg3_get_msglevel,
11671 .set_msglevel = tg3_set_msglevel,
11672 .nway_reset = tg3_nway_reset,
11673 .get_link = ethtool_op_get_link,
11674 .get_eeprom_len = tg3_get_eeprom_len,
11675 .get_eeprom = tg3_get_eeprom,
11676 .set_eeprom = tg3_set_eeprom,
11677 .get_ringparam = tg3_get_ringparam,
11678 .set_ringparam = tg3_set_ringparam,
11679 .get_pauseparam = tg3_get_pauseparam,
11680 .set_pauseparam = tg3_set_pauseparam,
11681 .self_test = tg3_self_test,
11682 .get_strings = tg3_get_strings,
11683 .set_phys_id = tg3_set_phys_id,
11684 .get_ethtool_stats = tg3_get_ethtool_stats,
11685 .get_coalesce = tg3_get_coalesce,
11686 .set_coalesce = tg3_set_coalesce,
11687 .get_sset_count = tg3_get_sset_count,
11690 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11692 u32 cursize, val, magic;
11694 tp->nvram_size = EEPROM_CHIP_SIZE;
11696 if (tg3_nvram_read(tp, 0, &magic) != 0)
11699 if ((magic != TG3_EEPROM_MAGIC) &&
11700 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11701 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11705 * Size the chip by reading offsets at increasing powers of two.
11706 * When we encounter our validation signature, we know the addressing
11707 * has wrapped around, and thus have our chip size.
11711 while (cursize < tp->nvram_size) {
11712 if (tg3_nvram_read(tp, cursize, &val) != 0)
11721 tp->nvram_size = cursize;
11724 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11728 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11729 tg3_nvram_read(tp, 0, &val) != 0)
11732 /* Selfboot format */
11733 if (val != TG3_EEPROM_MAGIC) {
11734 tg3_get_eeprom_size(tp);
11738 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11740 /* This is confusing. We want to operate on the
11741 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11742 * call will read from NVRAM and byteswap the data
11743 * according to the byteswapping settings for all
11744 * other register accesses. This ensures the data we
11745 * want will always reside in the lower 16-bits.
11746 * However, the data in NVRAM is in LE format, which
11747 * means the data from the NVRAM read will always be
11748 * opposite the endianness of the CPU. The 16-bit
11749 * byteswap then brings the data to CPU endianness.
11751 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11755 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11758 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11762 nvcfg1 = tr32(NVRAM_CFG1);
11763 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11764 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11766 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11767 tw32(NVRAM_CFG1, nvcfg1);
11770 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11771 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11772 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11773 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11774 tp->nvram_jedecnum = JEDEC_ATMEL;
11775 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11776 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11778 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11779 tp->nvram_jedecnum = JEDEC_ATMEL;
11780 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11782 case FLASH_VENDOR_ATMEL_EEPROM:
11783 tp->nvram_jedecnum = JEDEC_ATMEL;
11784 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11785 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11787 case FLASH_VENDOR_ST:
11788 tp->nvram_jedecnum = JEDEC_ST;
11789 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11790 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11792 case FLASH_VENDOR_SAIFUN:
11793 tp->nvram_jedecnum = JEDEC_SAIFUN;
11794 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11796 case FLASH_VENDOR_SST_SMALL:
11797 case FLASH_VENDOR_SST_LARGE:
11798 tp->nvram_jedecnum = JEDEC_SST;
11799 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11803 tp->nvram_jedecnum = JEDEC_ATMEL;
11804 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11805 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11809 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11811 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11812 case FLASH_5752PAGE_SIZE_256:
11813 tp->nvram_pagesize = 256;
11815 case FLASH_5752PAGE_SIZE_512:
11816 tp->nvram_pagesize = 512;
11818 case FLASH_5752PAGE_SIZE_1K:
11819 tp->nvram_pagesize = 1024;
11821 case FLASH_5752PAGE_SIZE_2K:
11822 tp->nvram_pagesize = 2048;
11824 case FLASH_5752PAGE_SIZE_4K:
11825 tp->nvram_pagesize = 4096;
11827 case FLASH_5752PAGE_SIZE_264:
11828 tp->nvram_pagesize = 264;
11830 case FLASH_5752PAGE_SIZE_528:
11831 tp->nvram_pagesize = 528;
11836 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11840 nvcfg1 = tr32(NVRAM_CFG1);
11842 /* NVRAM protection for TPM */
11843 if (nvcfg1 & (1 << 27))
11844 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11846 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11847 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11848 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11849 tp->nvram_jedecnum = JEDEC_ATMEL;
11850 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11852 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11853 tp->nvram_jedecnum = JEDEC_ATMEL;
11854 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11855 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11857 case FLASH_5752VENDOR_ST_M45PE10:
11858 case FLASH_5752VENDOR_ST_M45PE20:
11859 case FLASH_5752VENDOR_ST_M45PE40:
11860 tp->nvram_jedecnum = JEDEC_ST;
11861 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11862 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11866 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11867 tg3_nvram_get_pagesize(tp, nvcfg1);
11869 /* For eeprom, set pagesize to maximum eeprom size */
11870 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11872 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11873 tw32(NVRAM_CFG1, nvcfg1);
11877 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11879 u32 nvcfg1, protect = 0;
11881 nvcfg1 = tr32(NVRAM_CFG1);
11883 /* NVRAM protection for TPM */
11884 if (nvcfg1 & (1 << 27)) {
11885 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11889 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11891 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11892 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11893 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11894 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11895 tp->nvram_jedecnum = JEDEC_ATMEL;
11896 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11897 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11898 tp->nvram_pagesize = 264;
11899 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11900 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11901 tp->nvram_size = (protect ? 0x3e200 :
11902 TG3_NVRAM_SIZE_512KB);
11903 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11904 tp->nvram_size = (protect ? 0x1f200 :
11905 TG3_NVRAM_SIZE_256KB);
11907 tp->nvram_size = (protect ? 0x1f200 :
11908 TG3_NVRAM_SIZE_128KB);
11910 case FLASH_5752VENDOR_ST_M45PE10:
11911 case FLASH_5752VENDOR_ST_M45PE20:
11912 case FLASH_5752VENDOR_ST_M45PE40:
11913 tp->nvram_jedecnum = JEDEC_ST;
11914 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11915 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11916 tp->nvram_pagesize = 256;
11917 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11918 tp->nvram_size = (protect ?
11919 TG3_NVRAM_SIZE_64KB :
11920 TG3_NVRAM_SIZE_128KB);
11921 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11922 tp->nvram_size = (protect ?
11923 TG3_NVRAM_SIZE_64KB :
11924 TG3_NVRAM_SIZE_256KB);
11926 tp->nvram_size = (protect ?
11927 TG3_NVRAM_SIZE_128KB :
11928 TG3_NVRAM_SIZE_512KB);
11933 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11937 nvcfg1 = tr32(NVRAM_CFG1);
11939 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11940 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11941 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11942 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11943 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11944 tp->nvram_jedecnum = JEDEC_ATMEL;
11945 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11946 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11948 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11949 tw32(NVRAM_CFG1, nvcfg1);
11951 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11952 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11953 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11954 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11955 tp->nvram_jedecnum = JEDEC_ATMEL;
11956 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11957 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11958 tp->nvram_pagesize = 264;
11960 case FLASH_5752VENDOR_ST_M45PE10:
11961 case FLASH_5752VENDOR_ST_M45PE20:
11962 case FLASH_5752VENDOR_ST_M45PE40:
11963 tp->nvram_jedecnum = JEDEC_ST;
11964 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11965 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11966 tp->nvram_pagesize = 256;
11971 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11973 u32 nvcfg1, protect = 0;
11975 nvcfg1 = tr32(NVRAM_CFG1);
11977 /* NVRAM protection for TPM */
11978 if (nvcfg1 & (1 << 27)) {
11979 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11983 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11985 case FLASH_5761VENDOR_ATMEL_ADB021D:
11986 case FLASH_5761VENDOR_ATMEL_ADB041D:
11987 case FLASH_5761VENDOR_ATMEL_ADB081D:
11988 case FLASH_5761VENDOR_ATMEL_ADB161D:
11989 case FLASH_5761VENDOR_ATMEL_MDB021D:
11990 case FLASH_5761VENDOR_ATMEL_MDB041D:
11991 case FLASH_5761VENDOR_ATMEL_MDB081D:
11992 case FLASH_5761VENDOR_ATMEL_MDB161D:
11993 tp->nvram_jedecnum = JEDEC_ATMEL;
11994 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11995 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11996 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11997 tp->nvram_pagesize = 256;
11999 case FLASH_5761VENDOR_ST_A_M45PE20:
12000 case FLASH_5761VENDOR_ST_A_M45PE40:
12001 case FLASH_5761VENDOR_ST_A_M45PE80:
12002 case FLASH_5761VENDOR_ST_A_M45PE16:
12003 case FLASH_5761VENDOR_ST_M_M45PE20:
12004 case FLASH_5761VENDOR_ST_M_M45PE40:
12005 case FLASH_5761VENDOR_ST_M_M45PE80:
12006 case FLASH_5761VENDOR_ST_M_M45PE16:
12007 tp->nvram_jedecnum = JEDEC_ST;
12008 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12009 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12010 tp->nvram_pagesize = 256;
12015 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12018 case FLASH_5761VENDOR_ATMEL_ADB161D:
12019 case FLASH_5761VENDOR_ATMEL_MDB161D:
12020 case FLASH_5761VENDOR_ST_A_M45PE16:
12021 case FLASH_5761VENDOR_ST_M_M45PE16:
12022 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12024 case FLASH_5761VENDOR_ATMEL_ADB081D:
12025 case FLASH_5761VENDOR_ATMEL_MDB081D:
12026 case FLASH_5761VENDOR_ST_A_M45PE80:
12027 case FLASH_5761VENDOR_ST_M_M45PE80:
12028 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12030 case FLASH_5761VENDOR_ATMEL_ADB041D:
12031 case FLASH_5761VENDOR_ATMEL_MDB041D:
12032 case FLASH_5761VENDOR_ST_A_M45PE40:
12033 case FLASH_5761VENDOR_ST_M_M45PE40:
12034 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12036 case FLASH_5761VENDOR_ATMEL_ADB021D:
12037 case FLASH_5761VENDOR_ATMEL_MDB021D:
12038 case FLASH_5761VENDOR_ST_A_M45PE20:
12039 case FLASH_5761VENDOR_ST_M_M45PE20:
12040 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12046 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12048 tp->nvram_jedecnum = JEDEC_ATMEL;
12049 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12050 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12053 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12057 nvcfg1 = tr32(NVRAM_CFG1);
12059 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12060 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12061 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12062 tp->nvram_jedecnum = JEDEC_ATMEL;
12063 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12064 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12066 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12067 tw32(NVRAM_CFG1, nvcfg1);
12069 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12070 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12071 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12072 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12073 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12074 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12075 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12076 tp->nvram_jedecnum = JEDEC_ATMEL;
12077 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12078 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12080 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12081 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12082 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12083 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12084 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12086 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12087 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12088 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12090 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12091 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12092 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12096 case FLASH_5752VENDOR_ST_M45PE10:
12097 case FLASH_5752VENDOR_ST_M45PE20:
12098 case FLASH_5752VENDOR_ST_M45PE40:
12099 tp->nvram_jedecnum = JEDEC_ST;
12100 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12101 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12103 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12104 case FLASH_5752VENDOR_ST_M45PE10:
12105 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12107 case FLASH_5752VENDOR_ST_M45PE20:
12108 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12110 case FLASH_5752VENDOR_ST_M45PE40:
12111 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12116 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12120 tg3_nvram_get_pagesize(tp, nvcfg1);
12121 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12122 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12126 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12130 nvcfg1 = tr32(NVRAM_CFG1);
12132 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12133 case FLASH_5717VENDOR_ATMEL_EEPROM:
12134 case FLASH_5717VENDOR_MICRO_EEPROM:
12135 tp->nvram_jedecnum = JEDEC_ATMEL;
12136 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12137 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12139 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12140 tw32(NVRAM_CFG1, nvcfg1);
12142 case FLASH_5717VENDOR_ATMEL_MDB011D:
12143 case FLASH_5717VENDOR_ATMEL_ADB011B:
12144 case FLASH_5717VENDOR_ATMEL_ADB011D:
12145 case FLASH_5717VENDOR_ATMEL_MDB021D:
12146 case FLASH_5717VENDOR_ATMEL_ADB021B:
12147 case FLASH_5717VENDOR_ATMEL_ADB021D:
12148 case FLASH_5717VENDOR_ATMEL_45USPT:
12149 tp->nvram_jedecnum = JEDEC_ATMEL;
12150 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12151 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12153 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12154 case FLASH_5717VENDOR_ATMEL_MDB021D:
12155 /* Detect size with tg3_nvram_get_size() */
12157 case FLASH_5717VENDOR_ATMEL_ADB021B:
12158 case FLASH_5717VENDOR_ATMEL_ADB021D:
12159 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12162 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12166 case FLASH_5717VENDOR_ST_M_M25PE10:
12167 case FLASH_5717VENDOR_ST_A_M25PE10:
12168 case FLASH_5717VENDOR_ST_M_M45PE10:
12169 case FLASH_5717VENDOR_ST_A_M45PE10:
12170 case FLASH_5717VENDOR_ST_M_M25PE20:
12171 case FLASH_5717VENDOR_ST_A_M25PE20:
12172 case FLASH_5717VENDOR_ST_M_M45PE20:
12173 case FLASH_5717VENDOR_ST_A_M45PE20:
12174 case FLASH_5717VENDOR_ST_25USPT:
12175 case FLASH_5717VENDOR_ST_45USPT:
12176 tp->nvram_jedecnum = JEDEC_ST;
12177 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12178 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12180 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12181 case FLASH_5717VENDOR_ST_M_M25PE20:
12182 case FLASH_5717VENDOR_ST_M_M45PE20:
12183 /* Detect size with tg3_nvram_get_size() */
12185 case FLASH_5717VENDOR_ST_A_M25PE20:
12186 case FLASH_5717VENDOR_ST_A_M45PE20:
12187 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12190 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12195 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12199 tg3_nvram_get_pagesize(tp, nvcfg1);
12200 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12201 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12204 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12206 u32 nvcfg1, nvmpinstrp;
12208 nvcfg1 = tr32(NVRAM_CFG1);
12209 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12211 switch (nvmpinstrp) {
12212 case FLASH_5720_EEPROM_HD:
12213 case FLASH_5720_EEPROM_LD:
12214 tp->nvram_jedecnum = JEDEC_ATMEL;
12215 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12217 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12218 tw32(NVRAM_CFG1, nvcfg1);
12219 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12220 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12222 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12224 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12225 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12226 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12227 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12228 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12229 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12230 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12231 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12232 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12233 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12234 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12235 case FLASH_5720VENDOR_ATMEL_45USPT:
12236 tp->nvram_jedecnum = JEDEC_ATMEL;
12237 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12238 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12240 switch (nvmpinstrp) {
12241 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12242 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12243 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12244 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12246 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12247 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12248 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12249 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12251 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12252 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12253 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12256 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12260 case FLASH_5720VENDOR_M_ST_M25PE10:
12261 case FLASH_5720VENDOR_M_ST_M45PE10:
12262 case FLASH_5720VENDOR_A_ST_M25PE10:
12263 case FLASH_5720VENDOR_A_ST_M45PE10:
12264 case FLASH_5720VENDOR_M_ST_M25PE20:
12265 case FLASH_5720VENDOR_M_ST_M45PE20:
12266 case FLASH_5720VENDOR_A_ST_M25PE20:
12267 case FLASH_5720VENDOR_A_ST_M45PE20:
12268 case FLASH_5720VENDOR_M_ST_M25PE40:
12269 case FLASH_5720VENDOR_M_ST_M45PE40:
12270 case FLASH_5720VENDOR_A_ST_M25PE40:
12271 case FLASH_5720VENDOR_A_ST_M45PE40:
12272 case FLASH_5720VENDOR_M_ST_M25PE80:
12273 case FLASH_5720VENDOR_M_ST_M45PE80:
12274 case FLASH_5720VENDOR_A_ST_M25PE80:
12275 case FLASH_5720VENDOR_A_ST_M45PE80:
12276 case FLASH_5720VENDOR_ST_25USPT:
12277 case FLASH_5720VENDOR_ST_45USPT:
12278 tp->nvram_jedecnum = JEDEC_ST;
12279 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12280 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12282 switch (nvmpinstrp) {
12283 case FLASH_5720VENDOR_M_ST_M25PE20:
12284 case FLASH_5720VENDOR_M_ST_M45PE20:
12285 case FLASH_5720VENDOR_A_ST_M25PE20:
12286 case FLASH_5720VENDOR_A_ST_M45PE20:
12287 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12289 case FLASH_5720VENDOR_M_ST_M25PE40:
12290 case FLASH_5720VENDOR_M_ST_M45PE40:
12291 case FLASH_5720VENDOR_A_ST_M25PE40:
12292 case FLASH_5720VENDOR_A_ST_M45PE40:
12293 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12295 case FLASH_5720VENDOR_M_ST_M25PE80:
12296 case FLASH_5720VENDOR_M_ST_M45PE80:
12297 case FLASH_5720VENDOR_A_ST_M25PE80:
12298 case FLASH_5720VENDOR_A_ST_M45PE80:
12299 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12302 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12307 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12311 tg3_nvram_get_pagesize(tp, nvcfg1);
12312 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12313 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12316 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12317 static void __devinit tg3_nvram_init(struct tg3 *tp)
12319 tw32_f(GRC_EEPROM_ADDR,
12320 (EEPROM_ADDR_FSM_RESET |
12321 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12322 EEPROM_ADDR_CLKPERD_SHIFT)));
12326 /* Enable seeprom accesses. */
12327 tw32_f(GRC_LOCAL_CTRL,
12328 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12331 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12332 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12333 tp->tg3_flags |= TG3_FLAG_NVRAM;
12335 if (tg3_nvram_lock(tp)) {
12336 netdev_warn(tp->dev,
12337 "Cannot get nvram lock, %s failed\n",
12341 tg3_enable_nvram_access(tp);
12343 tp->nvram_size = 0;
12345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12346 tg3_get_5752_nvram_info(tp);
12347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12348 tg3_get_5755_nvram_info(tp);
12349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12352 tg3_get_5787_nvram_info(tp);
12353 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12354 tg3_get_5761_nvram_info(tp);
12355 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12356 tg3_get_5906_nvram_info(tp);
12357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12359 tg3_get_57780_nvram_info(tp);
12360 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12361 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12362 tg3_get_5717_nvram_info(tp);
12363 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12364 tg3_get_5720_nvram_info(tp);
12366 tg3_get_nvram_info(tp);
12368 if (tp->nvram_size == 0)
12369 tg3_get_nvram_size(tp);
12371 tg3_disable_nvram_access(tp);
12372 tg3_nvram_unlock(tp);
12375 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
12377 tg3_get_eeprom_size(tp);
12381 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12382 u32 offset, u32 len, u8 *buf)
12387 for (i = 0; i < len; i += 4) {
12393 memcpy(&data, buf + i, 4);
12396 * The SEEPROM interface expects the data to always be opposite
12397 * the native endian format. We accomplish this by reversing
12398 * all the operations that would have been performed on the
12399 * data from a call to tg3_nvram_read_be32().
12401 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12403 val = tr32(GRC_EEPROM_ADDR);
12404 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12406 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12408 tw32(GRC_EEPROM_ADDR, val |
12409 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12410 (addr & EEPROM_ADDR_ADDR_MASK) |
12411 EEPROM_ADDR_START |
12412 EEPROM_ADDR_WRITE);
12414 for (j = 0; j < 1000; j++) {
12415 val = tr32(GRC_EEPROM_ADDR);
12417 if (val & EEPROM_ADDR_COMPLETE)
12421 if (!(val & EEPROM_ADDR_COMPLETE)) {
12430 /* offset and length are dword aligned */
12431 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12435 u32 pagesize = tp->nvram_pagesize;
12436 u32 pagemask = pagesize - 1;
12440 tmp = kmalloc(pagesize, GFP_KERNEL);
12446 u32 phy_addr, page_off, size;
12448 phy_addr = offset & ~pagemask;
12450 for (j = 0; j < pagesize; j += 4) {
12451 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12452 (__be32 *) (tmp + j));
12459 page_off = offset & pagemask;
12466 memcpy(tmp + page_off, buf, size);
12468 offset = offset + (pagesize - page_off);
12470 tg3_enable_nvram_access(tp);
12473 * Before we can erase the flash page, we need
12474 * to issue a special "write enable" command.
12476 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12478 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12481 /* Erase the target page */
12482 tw32(NVRAM_ADDR, phy_addr);
12484 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12485 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12487 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12490 /* Issue another write enable to start the write. */
12491 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12493 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12496 for (j = 0; j < pagesize; j += 4) {
12499 data = *((__be32 *) (tmp + j));
12501 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12503 tw32(NVRAM_ADDR, phy_addr + j);
12505 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12509 nvram_cmd |= NVRAM_CMD_FIRST;
12510 else if (j == (pagesize - 4))
12511 nvram_cmd |= NVRAM_CMD_LAST;
12513 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12520 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12521 tg3_nvram_exec_cmd(tp, nvram_cmd);
12528 /* offset and length are dword aligned */
12529 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12534 for (i = 0; i < len; i += 4, offset += 4) {
12535 u32 page_off, phy_addr, nvram_cmd;
12538 memcpy(&data, buf + i, 4);
12539 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12541 page_off = offset % tp->nvram_pagesize;
12543 phy_addr = tg3_nvram_phys_addr(tp, offset);
12545 tw32(NVRAM_ADDR, phy_addr);
12547 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12549 if (page_off == 0 || i == 0)
12550 nvram_cmd |= NVRAM_CMD_FIRST;
12551 if (page_off == (tp->nvram_pagesize - 4))
12552 nvram_cmd |= NVRAM_CMD_LAST;
12554 if (i == (len - 4))
12555 nvram_cmd |= NVRAM_CMD_LAST;
12557 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12558 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12559 (tp->nvram_jedecnum == JEDEC_ST) &&
12560 (nvram_cmd & NVRAM_CMD_FIRST)) {
12562 if ((ret = tg3_nvram_exec_cmd(tp,
12563 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12568 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12569 /* We always do complete word writes to eeprom. */
12570 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12573 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12579 /* offset and length are dword aligned */
12580 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12584 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12585 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12586 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12590 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12591 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12595 ret = tg3_nvram_lock(tp);
12599 tg3_enable_nvram_access(tp);
12600 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12601 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12602 tw32(NVRAM_WRITE1, 0x406);
12604 grc_mode = tr32(GRC_MODE);
12605 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12607 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12608 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12610 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12613 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12617 grc_mode = tr32(GRC_MODE);
12618 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12620 tg3_disable_nvram_access(tp);
12621 tg3_nvram_unlock(tp);
12624 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12625 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12632 struct subsys_tbl_ent {
12633 u16 subsys_vendor, subsys_devid;
12637 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12638 /* Broadcom boards. */
12639 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12640 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12641 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12642 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12643 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12644 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12645 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12646 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12647 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12648 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12649 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12650 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12651 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12652 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12653 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12654 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12655 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12656 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12657 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12658 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12659 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12660 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12663 { TG3PCI_SUBVENDOR_ID_3COM,
12664 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12665 { TG3PCI_SUBVENDOR_ID_3COM,
12666 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12667 { TG3PCI_SUBVENDOR_ID_3COM,
12668 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12669 { TG3PCI_SUBVENDOR_ID_3COM,
12670 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12671 { TG3PCI_SUBVENDOR_ID_3COM,
12672 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12675 { TG3PCI_SUBVENDOR_ID_DELL,
12676 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12677 { TG3PCI_SUBVENDOR_ID_DELL,
12678 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12679 { TG3PCI_SUBVENDOR_ID_DELL,
12680 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12681 { TG3PCI_SUBVENDOR_ID_DELL,
12682 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12684 /* Compaq boards. */
12685 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12686 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12687 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12688 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12689 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12690 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12691 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12692 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12693 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12694 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12697 { TG3PCI_SUBVENDOR_ID_IBM,
12698 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12701 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12705 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12706 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12707 tp->pdev->subsystem_vendor) &&
12708 (subsys_id_to_phy_id[i].subsys_devid ==
12709 tp->pdev->subsystem_device))
12710 return &subsys_id_to_phy_id[i];
12715 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12720 /* On some early chips the SRAM cannot be accessed in D3hot state,
12721 * so need make sure we're in D0.
12723 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12724 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12725 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12728 /* Make sure register accesses (indirect or otherwise)
12729 * will function correctly.
12731 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12732 tp->misc_host_ctrl);
12734 /* The memory arbiter has to be enabled in order for SRAM accesses
12735 * to succeed. Normally on powerup the tg3 chip firmware will make
12736 * sure it is enabled, but other entities such as system netboot
12737 * code might disable it.
12739 val = tr32(MEMARB_MODE);
12740 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12742 tp->phy_id = TG3_PHY_ID_INVALID;
12743 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12745 /* Assume an onboard device and WOL capable by default. */
12746 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12749 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12750 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12751 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12753 val = tr32(VCPU_CFGSHDW);
12754 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12755 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12756 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12757 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12758 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12762 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12763 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12764 u32 nic_cfg, led_cfg;
12765 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12766 int eeprom_phy_serdes = 0;
12768 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12769 tp->nic_sram_data_cfg = nic_cfg;
12771 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12772 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12773 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12774 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12775 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12776 (ver > 0) && (ver < 0x100))
12777 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12780 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12782 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12783 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12784 eeprom_phy_serdes = 1;
12786 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12787 if (nic_phy_id != 0) {
12788 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12789 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12791 eeprom_phy_id = (id1 >> 16) << 10;
12792 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12793 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12797 tp->phy_id = eeprom_phy_id;
12798 if (eeprom_phy_serdes) {
12799 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12800 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12802 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12805 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12806 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12807 SHASTA_EXT_LED_MODE_MASK);
12809 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12813 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12814 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12817 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12818 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12821 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12822 tp->led_ctrl = LED_CTRL_MODE_MAC;
12824 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12825 * read on some older 5700/5701 bootcode.
12827 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12829 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12831 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12835 case SHASTA_EXT_LED_SHARED:
12836 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12837 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12838 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12839 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12840 LED_CTRL_MODE_PHY_2);
12843 case SHASTA_EXT_LED_MAC:
12844 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12847 case SHASTA_EXT_LED_COMBO:
12848 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12849 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12850 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12851 LED_CTRL_MODE_PHY_2);
12856 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12858 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12859 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12861 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12862 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12864 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12865 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12866 if ((tp->pdev->subsystem_vendor ==
12867 PCI_VENDOR_ID_ARIMA) &&
12868 (tp->pdev->subsystem_device == 0x205a ||
12869 tp->pdev->subsystem_device == 0x2063))
12870 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12872 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12873 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12876 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12877 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12878 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12879 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12882 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12883 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12884 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12886 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12887 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12888 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12890 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12891 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12892 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12894 if (cfg2 & (1 << 17))
12895 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12897 /* serdes signal pre-emphasis in register 0x590 set by */
12898 /* bootcode if bit 18 is set */
12899 if (cfg2 & (1 << 18))
12900 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12902 if (((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) ||
12903 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12904 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12905 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12906 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12908 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12909 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12910 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
12913 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12914 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12915 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12918 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12919 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12920 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12921 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12922 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12923 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12926 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12927 device_set_wakeup_enable(&tp->pdev->dev,
12928 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12930 device_set_wakeup_capable(&tp->pdev->dev, false);
12933 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12938 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12939 tw32(OTP_CTRL, cmd);
12941 /* Wait for up to 1 ms for command to execute. */
12942 for (i = 0; i < 100; i++) {
12943 val = tr32(OTP_STATUS);
12944 if (val & OTP_STATUS_CMD_DONE)
12949 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12952 /* Read the gphy configuration from the OTP region of the chip. The gphy
12953 * configuration is a 32-bit value that straddles the alignment boundary.
12954 * We do two 32-bit reads and then shift and merge the results.
12956 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12958 u32 bhalf_otp, thalf_otp;
12960 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12962 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12965 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12967 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12970 thalf_otp = tr32(OTP_READ_DATA);
12972 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12974 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12977 bhalf_otp = tr32(OTP_READ_DATA);
12979 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12982 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12984 u32 adv = ADVERTISED_Autoneg |
12987 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12988 adv |= ADVERTISED_1000baseT_Half |
12989 ADVERTISED_1000baseT_Full;
12991 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12992 adv |= ADVERTISED_100baseT_Half |
12993 ADVERTISED_100baseT_Full |
12994 ADVERTISED_10baseT_Half |
12995 ADVERTISED_10baseT_Full |
12998 adv |= ADVERTISED_FIBRE;
13000 tp->link_config.advertising = adv;
13001 tp->link_config.speed = SPEED_INVALID;
13002 tp->link_config.duplex = DUPLEX_INVALID;
13003 tp->link_config.autoneg = AUTONEG_ENABLE;
13004 tp->link_config.active_speed = SPEED_INVALID;
13005 tp->link_config.active_duplex = DUPLEX_INVALID;
13006 tp->link_config.orig_speed = SPEED_INVALID;
13007 tp->link_config.orig_duplex = DUPLEX_INVALID;
13008 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13011 static int __devinit tg3_phy_probe(struct tg3 *tp)
13013 u32 hw_phy_id_1, hw_phy_id_2;
13014 u32 hw_phy_id, hw_phy_id_masked;
13017 /* flow control autonegotiation is default behavior */
13018 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13019 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13021 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
13022 return tg3_phy_init(tp);
13024 /* Reading the PHY ID register can conflict with ASF
13025 * firmware access to the PHY hardware.
13028 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13029 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
13030 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13032 /* Now read the physical PHY_ID from the chip and verify
13033 * that it is sane. If it doesn't look good, we fall back
13034 * to either the hard-coded table based PHY_ID and failing
13035 * that the value found in the eeprom area.
13037 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13038 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13040 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13041 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13042 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13044 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13047 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13048 tp->phy_id = hw_phy_id;
13049 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13050 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13052 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13054 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13055 /* Do nothing, phy ID already set up in
13056 * tg3_get_eeprom_hw_cfg().
13059 struct subsys_tbl_ent *p;
13061 /* No eeprom signature? Try the hardcoded
13062 * subsys device table.
13064 p = tg3_lookup_by_subsys(tp);
13068 tp->phy_id = p->phy_id;
13070 tp->phy_id == TG3_PHY_ID_BCM8002)
13071 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13075 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13076 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13077 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13079 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13080 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13082 tg3_phy_init_link_config(tp);
13084 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13085 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
13086 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
13087 u32 bmsr, adv_reg, tg3_ctrl, mask;
13089 tg3_readphy(tp, MII_BMSR, &bmsr);
13090 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13091 (bmsr & BMSR_LSTATUS))
13092 goto skip_phy_reset;
13094 err = tg3_phy_reset(tp);
13098 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
13099 ADVERTISE_100HALF | ADVERTISE_100FULL |
13100 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
13102 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
13103 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
13104 MII_TG3_CTRL_ADV_1000_FULL);
13105 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13106 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
13107 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
13108 MII_TG3_CTRL_ENABLE_AS_MASTER);
13111 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13112 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13113 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13114 if (!tg3_copper_is_advertising_all(tp, mask)) {
13115 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13117 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13118 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13120 tg3_writephy(tp, MII_BMCR,
13121 BMCR_ANENABLE | BMCR_ANRESTART);
13123 tg3_phy_set_wirespeed(tp);
13125 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13126 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13127 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13132 err = tg3_init_5401phy_dsp(tp);
13136 err = tg3_init_5401phy_dsp(tp);
13142 static void __devinit tg3_read_vpd(struct tg3 *tp)
13145 unsigned int block_end, rosize, len;
13148 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13152 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13153 PCI_VPD_LRDT_RO_DATA);
13155 goto out_not_found;
13157 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13158 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13159 i += PCI_VPD_LRDT_TAG_SIZE;
13161 if (block_end > TG3_NVM_VPD_LEN)
13162 goto out_not_found;
13164 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13165 PCI_VPD_RO_KEYWORD_MFR_ID);
13167 len = pci_vpd_info_field_size(&vpd_data[j]);
13169 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13170 if (j + len > block_end || len != 4 ||
13171 memcmp(&vpd_data[j], "1028", 4))
13174 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13175 PCI_VPD_RO_KEYWORD_VENDOR0);
13179 len = pci_vpd_info_field_size(&vpd_data[j]);
13181 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13182 if (j + len > block_end)
13185 memcpy(tp->fw_ver, &vpd_data[j], len);
13186 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13190 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13191 PCI_VPD_RO_KEYWORD_PARTNO);
13193 goto out_not_found;
13195 len = pci_vpd_info_field_size(&vpd_data[i]);
13197 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13198 if (len > TG3_BPN_SIZE ||
13199 (len + i) > TG3_NVM_VPD_LEN)
13200 goto out_not_found;
13202 memcpy(tp->board_part_number, &vpd_data[i], len);
13206 if (tp->board_part_number[0])
13210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13211 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13212 strcpy(tp->board_part_number, "BCM5717");
13213 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13214 strcpy(tp->board_part_number, "BCM5718");
13217 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13218 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13219 strcpy(tp->board_part_number, "BCM57780");
13220 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13221 strcpy(tp->board_part_number, "BCM57760");
13222 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13223 strcpy(tp->board_part_number, "BCM57790");
13224 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13225 strcpy(tp->board_part_number, "BCM57788");
13228 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13229 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13230 strcpy(tp->board_part_number, "BCM57761");
13231 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13232 strcpy(tp->board_part_number, "BCM57765");
13233 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13234 strcpy(tp->board_part_number, "BCM57781");
13235 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13236 strcpy(tp->board_part_number, "BCM57785");
13237 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13238 strcpy(tp->board_part_number, "BCM57791");
13239 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13240 strcpy(tp->board_part_number, "BCM57795");
13243 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13244 strcpy(tp->board_part_number, "BCM95906");
13247 strcpy(tp->board_part_number, "none");
13251 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13255 if (tg3_nvram_read(tp, offset, &val) ||
13256 (val & 0xfc000000) != 0x0c000000 ||
13257 tg3_nvram_read(tp, offset + 4, &val) ||
13264 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13266 u32 val, offset, start, ver_offset;
13268 bool newver = false;
13270 if (tg3_nvram_read(tp, 0xc, &offset) ||
13271 tg3_nvram_read(tp, 0x4, &start))
13274 offset = tg3_nvram_logical_addr(tp, offset);
13276 if (tg3_nvram_read(tp, offset, &val))
13279 if ((val & 0xfc000000) == 0x0c000000) {
13280 if (tg3_nvram_read(tp, offset + 4, &val))
13287 dst_off = strlen(tp->fw_ver);
13290 if (TG3_VER_SIZE - dst_off < 16 ||
13291 tg3_nvram_read(tp, offset + 8, &ver_offset))
13294 offset = offset + ver_offset - start;
13295 for (i = 0; i < 16; i += 4) {
13297 if (tg3_nvram_read_be32(tp, offset + i, &v))
13300 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13305 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13308 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13309 TG3_NVM_BCVER_MAJSFT;
13310 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13311 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13312 "v%d.%02d", major, minor);
13316 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13318 u32 val, major, minor;
13320 /* Use native endian representation */
13321 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13324 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13325 TG3_NVM_HWSB_CFG1_MAJSFT;
13326 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13327 TG3_NVM_HWSB_CFG1_MINSFT;
13329 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13332 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13334 u32 offset, major, minor, build;
13336 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13338 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13341 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13342 case TG3_EEPROM_SB_REVISION_0:
13343 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13345 case TG3_EEPROM_SB_REVISION_2:
13346 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13348 case TG3_EEPROM_SB_REVISION_3:
13349 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13351 case TG3_EEPROM_SB_REVISION_4:
13352 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13354 case TG3_EEPROM_SB_REVISION_5:
13355 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13357 case TG3_EEPROM_SB_REVISION_6:
13358 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13364 if (tg3_nvram_read(tp, offset, &val))
13367 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13368 TG3_EEPROM_SB_EDH_BLD_SHFT;
13369 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13370 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13371 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13373 if (minor > 99 || build > 26)
13376 offset = strlen(tp->fw_ver);
13377 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13378 " v%d.%02d", major, minor);
13381 offset = strlen(tp->fw_ver);
13382 if (offset < TG3_VER_SIZE - 1)
13383 tp->fw_ver[offset] = 'a' + build - 1;
13387 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13389 u32 val, offset, start;
13392 for (offset = TG3_NVM_DIR_START;
13393 offset < TG3_NVM_DIR_END;
13394 offset += TG3_NVM_DIRENT_SIZE) {
13395 if (tg3_nvram_read(tp, offset, &val))
13398 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13402 if (offset == TG3_NVM_DIR_END)
13405 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
13406 start = 0x08000000;
13407 else if (tg3_nvram_read(tp, offset - 4, &start))
13410 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13411 !tg3_fw_img_is_valid(tp, offset) ||
13412 tg3_nvram_read(tp, offset + 8, &val))
13415 offset += val - start;
13417 vlen = strlen(tp->fw_ver);
13419 tp->fw_ver[vlen++] = ',';
13420 tp->fw_ver[vlen++] = ' ';
13422 for (i = 0; i < 4; i++) {
13424 if (tg3_nvram_read_be32(tp, offset, &v))
13427 offset += sizeof(v);
13429 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13430 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13434 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13439 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13445 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
13446 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13449 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13450 if (apedata != APE_SEG_SIG_MAGIC)
13453 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13454 if (!(apedata & APE_FW_STATUS_READY))
13457 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13459 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13460 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
13466 vlen = strlen(tp->fw_ver);
13468 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13470 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13471 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13472 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13473 (apedata & APE_FW_VERSION_BLDMSK));
13476 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13479 bool vpd_vers = false;
13481 if (tp->fw_ver[0] != 0)
13484 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13485 strcat(tp->fw_ver, "sb");
13489 if (tg3_nvram_read(tp, 0, &val))
13492 if (val == TG3_EEPROM_MAGIC)
13493 tg3_read_bc_ver(tp);
13494 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13495 tg3_read_sb_ver(tp, val);
13496 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13497 tg3_read_hwsb_ver(tp);
13501 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13502 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13505 tg3_read_mgmtfw_ver(tp);
13508 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13511 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13513 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13515 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
13516 return TG3_RX_RET_MAX_SIZE_5717;
13517 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13518 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13519 return TG3_RX_RET_MAX_SIZE_5700;
13521 return TG3_RX_RET_MAX_SIZE_5705;
13524 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13525 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13526 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13527 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13531 static int __devinit tg3_get_invariants(struct tg3 *tp)
13534 u32 pci_state_reg, grc_misc_cfg;
13539 /* Force memory write invalidate off. If we leave it on,
13540 * then on 5700_BX chips we have to enable a workaround.
13541 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13542 * to match the cacheline size. The Broadcom driver have this
13543 * workaround but turns MWI off all the times so never uses
13544 * it. This seems to suggest that the workaround is insufficient.
13546 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13547 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13548 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13550 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13551 * has the register indirect write enable bit set before
13552 * we try to access any of the MMIO registers. It is also
13553 * critical that the PCI-X hw workaround situation is decided
13554 * before that as well.
13556 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13559 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13560 MISC_HOST_CTRL_CHIPREV_SHIFT);
13561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13562 u32 prod_id_asic_rev;
13564 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13565 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13566 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13567 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13568 pci_read_config_dword(tp->pdev,
13569 TG3PCI_GEN2_PRODID_ASICREV,
13570 &prod_id_asic_rev);
13571 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13576 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13577 pci_read_config_dword(tp->pdev,
13578 TG3PCI_GEN15_PRODID_ASICREV,
13579 &prod_id_asic_rev);
13581 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13582 &prod_id_asic_rev);
13584 tp->pci_chip_rev_id = prod_id_asic_rev;
13587 /* Wrong chip ID in 5752 A0. This code can be removed later
13588 * as A0 is not in production.
13590 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13591 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13593 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13594 * we need to disable memory and use config. cycles
13595 * only to access all registers. The 5702/03 chips
13596 * can mistakenly decode the special cycles from the
13597 * ICH chipsets as memory write cycles, causing corruption
13598 * of register and memory space. Only certain ICH bridges
13599 * will drive special cycles with non-zero data during the
13600 * address phase which can fall within the 5703's address
13601 * range. This is not an ICH bug as the PCI spec allows
13602 * non-zero address during special cycles. However, only
13603 * these ICH bridges are known to drive non-zero addresses
13604 * during special cycles.
13606 * Since special cycles do not cross PCI bridges, we only
13607 * enable this workaround if the 5703 is on the secondary
13608 * bus of these ICH bridges.
13610 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13611 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13612 static struct tg3_dev_id {
13616 } ich_chipsets[] = {
13617 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13619 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13621 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13623 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13627 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13628 struct pci_dev *bridge = NULL;
13630 while (pci_id->vendor != 0) {
13631 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13637 if (pci_id->rev != PCI_ANY_ID) {
13638 if (bridge->revision > pci_id->rev)
13641 if (bridge->subordinate &&
13642 (bridge->subordinate->number ==
13643 tp->pdev->bus->number)) {
13645 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13646 pci_dev_put(bridge);
13652 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13653 static struct tg3_dev_id {
13656 } bridge_chipsets[] = {
13657 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13658 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13661 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13662 struct pci_dev *bridge = NULL;
13664 while (pci_id->vendor != 0) {
13665 bridge = pci_get_device(pci_id->vendor,
13672 if (bridge->subordinate &&
13673 (bridge->subordinate->number <=
13674 tp->pdev->bus->number) &&
13675 (bridge->subordinate->subordinate >=
13676 tp->pdev->bus->number)) {
13677 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13678 pci_dev_put(bridge);
13684 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13685 * DMA addresses > 40-bit. This bridge may have other additional
13686 * 57xx devices behind it in some 4-port NIC designs for example.
13687 * Any tg3 device found behind the bridge will also need the 40-bit
13690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13692 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13693 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13694 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13696 struct pci_dev *bridge = NULL;
13699 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13700 PCI_DEVICE_ID_SERVERWORKS_EPB,
13702 if (bridge && bridge->subordinate &&
13703 (bridge->subordinate->number <=
13704 tp->pdev->bus->number) &&
13705 (bridge->subordinate->subordinate >=
13706 tp->pdev->bus->number)) {
13707 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13708 pci_dev_put(bridge);
13714 /* Initialize misc host control in PCI block. */
13715 tp->misc_host_ctrl |= (misc_ctrl_reg &
13716 MISC_HOST_CTRL_CHIPREV);
13717 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13718 tp->misc_host_ctrl);
13720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13721 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13723 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13724 tp->pdev_peer = tg3_find_peer(tp);
13726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13729 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13732 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13733 tp->tg3_flags3 |= TG3_FLG3_57765_PLUS;
13735 /* Intentionally exclude ASIC_REV_5906 */
13736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13738 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13742 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13743 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13748 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13749 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13750 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13754 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13755 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13757 /* 5700 B0 chips do not support checksumming correctly due
13758 * to hardware bugs.
13760 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13761 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13763 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13764 features |= NETIF_F_IPV6_CSUM;
13765 tp->dev->features |= features;
13766 tp->dev->hw_features |= features;
13767 tp->dev->vlan_features |= features;
13770 /* Determine TSO capabilities */
13771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13772 ; /* Do nothing. HW bug. */
13773 else if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
13774 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13775 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13777 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13778 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13779 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13781 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13782 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13783 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13784 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13785 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13786 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13788 tp->fw_needed = FIRMWARE_TG3TSO5;
13790 tp->fw_needed = FIRMWARE_TG3TSO;
13795 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13796 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13797 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13798 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13799 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13800 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13801 tp->pdev_peer == tp->pdev))
13802 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13804 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13806 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13809 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
13810 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13811 tp->irq_max = TG3_IRQ_MAX_VECS;
13815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13818 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13819 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13820 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13821 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13824 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13825 tp->tg3_flags3 |= TG3_FLG3_LRG_PROD_RING_CAP;
13827 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
13828 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13829 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13831 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13832 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13833 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13834 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13836 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13839 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13840 if (tp->pcie_cap != 0) {
13843 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13845 tp->pcie_readrq = 4096;
13846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13848 tp->pcie_readrq = 2048;
13850 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13852 pci_read_config_word(tp->pdev,
13853 tp->pcie_cap + PCI_EXP_LNKCTL,
13855 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13857 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13859 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13860 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13861 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13862 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13863 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13864 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13866 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13867 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13868 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13869 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13870 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13871 if (!tp->pcix_cap) {
13872 dev_err(&tp->pdev->dev,
13873 "Cannot find PCI-X capability, aborting\n");
13877 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13878 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13881 /* If we have an AMD 762 or VIA K8T800 chipset, write
13882 * reordering to the mailbox registers done by the host
13883 * controller can cause major troubles. We read back from
13884 * every mailbox register write to force the writes to be
13885 * posted to the chip in order.
13887 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13888 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13889 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13891 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13892 &tp->pci_cacheline_sz);
13893 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13894 &tp->pci_lat_timer);
13895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13896 tp->pci_lat_timer < 64) {
13897 tp->pci_lat_timer = 64;
13898 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13899 tp->pci_lat_timer);
13902 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13903 /* 5700 BX chips need to have their TX producer index
13904 * mailboxes written twice to workaround a bug.
13906 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13908 /* If we are in PCI-X mode, enable register write workaround.
13910 * The workaround is to use indirect register accesses
13911 * for all chip writes not to mailbox registers.
13913 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13916 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13918 /* The chip can have it's power management PCI config
13919 * space registers clobbered due to this bug.
13920 * So explicitly force the chip into D0 here.
13922 pci_read_config_dword(tp->pdev,
13923 tp->pm_cap + PCI_PM_CTRL,
13925 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13926 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13927 pci_write_config_dword(tp->pdev,
13928 tp->pm_cap + PCI_PM_CTRL,
13931 /* Also, force SERR#/PERR# in PCI command. */
13932 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13933 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13934 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13938 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13939 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13940 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13941 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13943 /* Chip-specific fixup from Broadcom driver */
13944 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13945 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13946 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13947 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13950 /* Default fast path register access methods */
13951 tp->read32 = tg3_read32;
13952 tp->write32 = tg3_write32;
13953 tp->read32_mbox = tg3_read32;
13954 tp->write32_mbox = tg3_write32;
13955 tp->write32_tx_mbox = tg3_write32;
13956 tp->write32_rx_mbox = tg3_write32;
13958 /* Various workaround register access methods */
13959 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13960 tp->write32 = tg3_write_indirect_reg32;
13961 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13962 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13963 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13965 * Back to back register writes can cause problems on these
13966 * chips, the workaround is to read back all reg writes
13967 * except those to mailbox regs.
13969 * See tg3_write_indirect_reg32().
13971 tp->write32 = tg3_write_flush_reg32;
13974 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13975 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13976 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13977 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13978 tp->write32_rx_mbox = tg3_write_flush_reg32;
13981 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13982 tp->read32 = tg3_read_indirect_reg32;
13983 tp->write32 = tg3_write_indirect_reg32;
13984 tp->read32_mbox = tg3_read_indirect_mbox;
13985 tp->write32_mbox = tg3_write_indirect_mbox;
13986 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13987 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13992 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13993 pci_cmd &= ~PCI_COMMAND_MEMORY;
13994 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13997 tp->read32_mbox = tg3_read32_mbox_5906;
13998 tp->write32_mbox = tg3_write32_mbox_5906;
13999 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14000 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14003 if (tp->write32 == tg3_write_indirect_reg32 ||
14004 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14005 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14007 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
14009 /* Get eeprom hw config before calling tg3_set_power_state().
14010 * In particular, the TG3_FLG2_IS_NIC flag must be
14011 * determined before calling tg3_set_power_state() so that
14012 * we know whether or not to switch out of Vaux power.
14013 * When the flag is set, it means that GPIO1 is used for eeprom
14014 * write protect and also implies that it is a LOM where GPIOs
14015 * are not used to switch power.
14017 tg3_get_eeprom_hw_cfg(tp);
14019 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14020 /* Allow reads and writes to the
14021 * APE register and memory space.
14023 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14024 PCISTATE_ALLOW_APE_SHMEM_WR |
14025 PCISTATE_ALLOW_APE_PSPACE_WR;
14026 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14034 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
14035 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
14037 /* Set up tp->grc_local_ctrl before calling tg_power_up().
14038 * GPIO1 driven high will bring 5700's external PHY out of reset.
14039 * It is also used as eeprom write protect on LOMs.
14041 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14042 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14043 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
14044 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14045 GRC_LCLCTRL_GPIO_OUTPUT1);
14046 /* Unused GPIO3 must be driven as output on 5752 because there
14047 * are no pull-up resistors on unused GPIO pins.
14049 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14050 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14055 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14057 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14058 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14059 /* Turn off the debug UART. */
14060 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14061 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
14062 /* Keep VMain power. */
14063 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14064 GRC_LCLCTRL_GPIO_OUTPUT0;
14067 /* Force the chip into D0. */
14068 err = tg3_power_up(tp);
14070 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14074 /* Derive initial jumbo mode from MTU assigned in
14075 * ether_setup() via the alloc_etherdev() call
14077 if (tp->dev->mtu > ETH_DATA_LEN &&
14078 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
14079 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
14081 /* Determine WakeOnLan speed to use. */
14082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14083 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14084 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14085 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14086 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
14088 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
14091 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14092 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14094 /* A few boards don't want Ethernet@WireSpeed phy feature */
14095 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14096 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
14097 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14098 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14099 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14100 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14101 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14103 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14104 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14105 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14106 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14107 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14109 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
14110 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14111 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14112 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14113 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
14114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14115 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14118 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14119 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14120 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14121 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14122 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14124 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14128 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14129 tp->phy_otp = tg3_read_otp_phycfg(tp);
14130 if (tp->phy_otp == 0)
14131 tp->phy_otp = TG3_OTP_DEFAULT;
14134 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
14135 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14137 tp->mi_mode = MAC_MI_MODE_BASE;
14139 tp->coalesce_mode = 0;
14140 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14141 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14142 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14144 /* Set these bits to enable statistics workaround. */
14145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14146 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14147 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14148 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14149 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14154 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
14156 err = tg3_mdio_init(tp);
14160 /* Initialize data/descriptor byte/word swapping. */
14161 val = tr32(GRC_MODE);
14162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14163 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14164 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14165 GRC_MODE_B2HRX_ENABLE |
14166 GRC_MODE_HTX2B_ENABLE |
14167 GRC_MODE_HOST_STACKUP);
14169 val &= GRC_MODE_HOST_STACKUP;
14171 tw32(GRC_MODE, val | tp->grc_mode);
14173 tg3_switch_clocks(tp);
14175 /* Clear this out for sanity. */
14176 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14178 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14180 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14181 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
14182 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14184 if (chiprevid == CHIPREV_ID_5701_A0 ||
14185 chiprevid == CHIPREV_ID_5701_B0 ||
14186 chiprevid == CHIPREV_ID_5701_B2 ||
14187 chiprevid == CHIPREV_ID_5701_B5) {
14188 void __iomem *sram_base;
14190 /* Write some dummy words into the SRAM status block
14191 * area, see if it reads back correctly. If the return
14192 * value is bad, force enable the PCIX workaround.
14194 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14196 writel(0x00000000, sram_base);
14197 writel(0x00000000, sram_base + 4);
14198 writel(0xffffffff, sram_base + 4);
14199 if (readl(sram_base) != 0x00000000)
14200 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
14205 tg3_nvram_init(tp);
14207 grc_misc_cfg = tr32(GRC_MISC_CFG);
14208 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14211 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14212 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14213 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
14215 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
14216 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14217 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
14218 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
14219 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14220 HOSTCC_MODE_CLRTICK_TXBD);
14222 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14223 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14224 tp->misc_host_ctrl);
14227 /* Preserve the APE MAC_MODE bits */
14228 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
14229 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14231 tp->mac_mode = TG3_DEF_MAC_MODE;
14233 /* these are limited to 10/100 only */
14234 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14235 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14236 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14237 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14238 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14239 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14240 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14241 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14242 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14243 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14244 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14245 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14246 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14247 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14248 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14249 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14251 err = tg3_phy_probe(tp);
14253 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14254 /* ... but do not return immediately ... */
14259 tg3_read_fw_ver(tp);
14261 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14262 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14265 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14267 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14270 /* 5700 {AX,BX} chips have a broken status block link
14271 * change bit implementation, so we must use the
14272 * status register in those cases.
14274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14275 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14277 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
14279 /* The led_ctrl is set during tg3_phy_probe, here we might
14280 * have to force the link status polling mechanism based
14281 * upon subsystem IDs.
14283 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14285 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14286 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14287 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14290 /* For all SERDES we poll the MAC status register. */
14291 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14292 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
14294 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
14296 tp->rx_offset = NET_IP_ALIGN;
14297 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14299 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
14301 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14302 tp->rx_copy_thresh = ~(u16)0;
14306 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14307 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14308 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14310 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14312 /* Increment the rx prod index on the rx std ring by at most
14313 * 8 for these chips to workaround hw errata.
14315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14317 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14318 tp->rx_std_max_post = 8;
14320 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
14321 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14322 PCIE_PWR_MGMT_L1_THRESH_MSK;
14327 #ifdef CONFIG_SPARC
14328 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14330 struct net_device *dev = tp->dev;
14331 struct pci_dev *pdev = tp->pdev;
14332 struct device_node *dp = pci_device_to_OF_node(pdev);
14333 const unsigned char *addr;
14336 addr = of_get_property(dp, "local-mac-address", &len);
14337 if (addr && len == 6) {
14338 memcpy(dev->dev_addr, addr, 6);
14339 memcpy(dev->perm_addr, dev->dev_addr, 6);
14345 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14347 struct net_device *dev = tp->dev;
14349 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14350 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14355 static int __devinit tg3_get_device_address(struct tg3 *tp)
14357 struct net_device *dev = tp->dev;
14358 u32 hi, lo, mac_offset;
14361 #ifdef CONFIG_SPARC
14362 if (!tg3_get_macaddr_sparc(tp))
14367 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14368 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
14369 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14371 if (tg3_nvram_lock(tp))
14372 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14374 tg3_nvram_unlock(tp);
14375 } else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14376 if (PCI_FUNC(tp->pdev->devfn) & 1)
14378 if (PCI_FUNC(tp->pdev->devfn) > 1)
14379 mac_offset += 0x18c;
14380 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14383 /* First try to get it from MAC address mailbox. */
14384 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14385 if ((hi >> 16) == 0x484b) {
14386 dev->dev_addr[0] = (hi >> 8) & 0xff;
14387 dev->dev_addr[1] = (hi >> 0) & 0xff;
14389 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14390 dev->dev_addr[2] = (lo >> 24) & 0xff;
14391 dev->dev_addr[3] = (lo >> 16) & 0xff;
14392 dev->dev_addr[4] = (lo >> 8) & 0xff;
14393 dev->dev_addr[5] = (lo >> 0) & 0xff;
14395 /* Some old bootcode may report a 0 MAC address in SRAM */
14396 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14399 /* Next, try NVRAM. */
14400 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
14401 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14402 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14403 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14404 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14406 /* Finally just fetch it out of the MAC control regs. */
14408 hi = tr32(MAC_ADDR_0_HIGH);
14409 lo = tr32(MAC_ADDR_0_LOW);
14411 dev->dev_addr[5] = lo & 0xff;
14412 dev->dev_addr[4] = (lo >> 8) & 0xff;
14413 dev->dev_addr[3] = (lo >> 16) & 0xff;
14414 dev->dev_addr[2] = (lo >> 24) & 0xff;
14415 dev->dev_addr[1] = hi & 0xff;
14416 dev->dev_addr[0] = (hi >> 8) & 0xff;
14420 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14421 #ifdef CONFIG_SPARC
14422 if (!tg3_get_default_macaddr_sparc(tp))
14427 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14431 #define BOUNDARY_SINGLE_CACHELINE 1
14432 #define BOUNDARY_MULTI_CACHELINE 2
14434 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14436 int cacheline_size;
14440 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14442 cacheline_size = 1024;
14444 cacheline_size = (int) byte * 4;
14446 /* On 5703 and later chips, the boundary bits have no
14449 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14450 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14451 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
14454 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14455 goal = BOUNDARY_MULTI_CACHELINE;
14457 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14458 goal = BOUNDARY_SINGLE_CACHELINE;
14464 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14465 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14472 /* PCI controllers on most RISC systems tend to disconnect
14473 * when a device tries to burst across a cache-line boundary.
14474 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14476 * Unfortunately, for PCI-E there are only limited
14477 * write-side controls for this, and thus for reads
14478 * we will still get the disconnects. We'll also waste
14479 * these PCI cycles for both read and write for chips
14480 * other than 5700 and 5701 which do not implement the
14483 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14484 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14485 switch (cacheline_size) {
14490 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14491 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14492 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14494 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14495 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14500 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14501 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14505 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14506 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14509 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14510 switch (cacheline_size) {
14514 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14515 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14516 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14522 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14523 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14527 switch (cacheline_size) {
14529 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14530 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14531 DMA_RWCTRL_WRITE_BNDRY_16);
14536 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14537 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14538 DMA_RWCTRL_WRITE_BNDRY_32);
14543 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14544 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14545 DMA_RWCTRL_WRITE_BNDRY_64);
14550 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14551 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14552 DMA_RWCTRL_WRITE_BNDRY_128);
14557 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14558 DMA_RWCTRL_WRITE_BNDRY_256);
14561 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14562 DMA_RWCTRL_WRITE_BNDRY_512);
14566 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14567 DMA_RWCTRL_WRITE_BNDRY_1024);
14576 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14578 struct tg3_internal_buffer_desc test_desc;
14579 u32 sram_dma_descs;
14582 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14584 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14585 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14586 tw32(RDMAC_STATUS, 0);
14587 tw32(WDMAC_STATUS, 0);
14589 tw32(BUFMGR_MODE, 0);
14590 tw32(FTQ_RESET, 0);
14592 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14593 test_desc.addr_lo = buf_dma & 0xffffffff;
14594 test_desc.nic_mbuf = 0x00002100;
14595 test_desc.len = size;
14598 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14599 * the *second* time the tg3 driver was getting loaded after an
14602 * Broadcom tells me:
14603 * ...the DMA engine is connected to the GRC block and a DMA
14604 * reset may affect the GRC block in some unpredictable way...
14605 * The behavior of resets to individual blocks has not been tested.
14607 * Broadcom noted the GRC reset will also reset all sub-components.
14610 test_desc.cqid_sqid = (13 << 8) | 2;
14612 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14615 test_desc.cqid_sqid = (16 << 8) | 7;
14617 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14620 test_desc.flags = 0x00000005;
14622 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14625 val = *(((u32 *)&test_desc) + i);
14626 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14627 sram_dma_descs + (i * sizeof(u32)));
14628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14630 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14633 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14635 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14638 for (i = 0; i < 40; i++) {
14642 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14644 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14645 if ((val & 0xffff) == sram_dma_descs) {
14656 #define TEST_BUFFER_SIZE 0x2000
14658 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14659 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14663 static int __devinit tg3_test_dma(struct tg3 *tp)
14665 dma_addr_t buf_dma;
14666 u32 *buf, saved_dma_rwctrl;
14669 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14670 &buf_dma, GFP_KERNEL);
14676 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14677 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14679 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14681 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
14684 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14685 /* DMA read watermark not used on PCIE */
14686 tp->dma_rwctrl |= 0x00180000;
14687 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14690 tp->dma_rwctrl |= 0x003f0000;
14692 tp->dma_rwctrl |= 0x003f000f;
14694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14696 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14697 u32 read_water = 0x7;
14699 /* If the 5704 is behind the EPB bridge, we can
14700 * do the less restrictive ONE_DMA workaround for
14701 * better performance.
14703 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14705 tp->dma_rwctrl |= 0x8000;
14706 else if (ccval == 0x6 || ccval == 0x7)
14707 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14711 /* Set bit 23 to enable PCIX hw bug fix */
14713 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14714 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14716 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14717 /* 5780 always in PCIX mode */
14718 tp->dma_rwctrl |= 0x00144000;
14719 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14720 /* 5714 always in PCIX mode */
14721 tp->dma_rwctrl |= 0x00148000;
14723 tp->dma_rwctrl |= 0x001b000f;
14727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14729 tp->dma_rwctrl &= 0xfffffff0;
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14733 /* Remove this if it causes problems for some boards. */
14734 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14736 /* On 5700/5701 chips, we need to set this bit.
14737 * Otherwise the chip will issue cacheline transactions
14738 * to streamable DMA memory with not all the byte
14739 * enables turned on. This is an error on several
14740 * RISC PCI controllers, in particular sparc64.
14742 * On 5703/5704 chips, this bit has been reassigned
14743 * a different meaning. In particular, it is used
14744 * on those chips to enable a PCI-X workaround.
14746 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14749 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14752 /* Unneeded, already done by tg3_get_invariants. */
14753 tg3_switch_clocks(tp);
14756 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14757 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14760 /* It is best to perform DMA test with maximum write burst size
14761 * to expose the 5700/5701 write DMA bug.
14763 saved_dma_rwctrl = tp->dma_rwctrl;
14764 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14765 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14770 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14773 /* Send the buffer to the chip. */
14774 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14776 dev_err(&tp->pdev->dev,
14777 "%s: Buffer write failed. err = %d\n",
14783 /* validate data reached card RAM correctly. */
14784 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14786 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14787 if (le32_to_cpu(val) != p[i]) {
14788 dev_err(&tp->pdev->dev,
14789 "%s: Buffer corrupted on device! "
14790 "(%d != %d)\n", __func__, val, i);
14791 /* ret = -ENODEV here? */
14796 /* Now read it back. */
14797 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14799 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14800 "err = %d\n", __func__, ret);
14805 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14809 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14810 DMA_RWCTRL_WRITE_BNDRY_16) {
14811 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14812 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14813 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14816 dev_err(&tp->pdev->dev,
14817 "%s: Buffer corrupted on read back! "
14818 "(%d != %d)\n", __func__, p[i], i);
14824 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14830 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14831 DMA_RWCTRL_WRITE_BNDRY_16) {
14833 /* DMA test passed without adjusting DMA boundary,
14834 * now look for chipsets that are known to expose the
14835 * DMA bug without failing the test.
14837 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14838 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14839 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14841 /* Safe to use the calculated DMA boundary. */
14842 tp->dma_rwctrl = saved_dma_rwctrl;
14845 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14849 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14854 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14856 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14857 tp->bufmgr_config.mbuf_read_dma_low_water =
14858 DEFAULT_MB_RDMA_LOW_WATER_5705;
14859 tp->bufmgr_config.mbuf_mac_rx_low_water =
14860 DEFAULT_MB_MACRX_LOW_WATER_57765;
14861 tp->bufmgr_config.mbuf_high_water =
14862 DEFAULT_MB_HIGH_WATER_57765;
14864 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14865 DEFAULT_MB_RDMA_LOW_WATER_5705;
14866 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14867 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14868 tp->bufmgr_config.mbuf_high_water_jumbo =
14869 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14870 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14871 tp->bufmgr_config.mbuf_read_dma_low_water =
14872 DEFAULT_MB_RDMA_LOW_WATER_5705;
14873 tp->bufmgr_config.mbuf_mac_rx_low_water =
14874 DEFAULT_MB_MACRX_LOW_WATER_5705;
14875 tp->bufmgr_config.mbuf_high_water =
14876 DEFAULT_MB_HIGH_WATER_5705;
14877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14878 tp->bufmgr_config.mbuf_mac_rx_low_water =
14879 DEFAULT_MB_MACRX_LOW_WATER_5906;
14880 tp->bufmgr_config.mbuf_high_water =
14881 DEFAULT_MB_HIGH_WATER_5906;
14884 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14885 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14886 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14887 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14888 tp->bufmgr_config.mbuf_high_water_jumbo =
14889 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14891 tp->bufmgr_config.mbuf_read_dma_low_water =
14892 DEFAULT_MB_RDMA_LOW_WATER;
14893 tp->bufmgr_config.mbuf_mac_rx_low_water =
14894 DEFAULT_MB_MACRX_LOW_WATER;
14895 tp->bufmgr_config.mbuf_high_water =
14896 DEFAULT_MB_HIGH_WATER;
14898 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14899 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14900 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14901 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14902 tp->bufmgr_config.mbuf_high_water_jumbo =
14903 DEFAULT_MB_HIGH_WATER_JUMBO;
14906 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14907 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14910 static char * __devinit tg3_phy_string(struct tg3 *tp)
14912 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14913 case TG3_PHY_ID_BCM5400: return "5400";
14914 case TG3_PHY_ID_BCM5401: return "5401";
14915 case TG3_PHY_ID_BCM5411: return "5411";
14916 case TG3_PHY_ID_BCM5701: return "5701";
14917 case TG3_PHY_ID_BCM5703: return "5703";
14918 case TG3_PHY_ID_BCM5704: return "5704";
14919 case TG3_PHY_ID_BCM5705: return "5705";
14920 case TG3_PHY_ID_BCM5750: return "5750";
14921 case TG3_PHY_ID_BCM5752: return "5752";
14922 case TG3_PHY_ID_BCM5714: return "5714";
14923 case TG3_PHY_ID_BCM5780: return "5780";
14924 case TG3_PHY_ID_BCM5755: return "5755";
14925 case TG3_PHY_ID_BCM5787: return "5787";
14926 case TG3_PHY_ID_BCM5784: return "5784";
14927 case TG3_PHY_ID_BCM5756: return "5722/5756";
14928 case TG3_PHY_ID_BCM5906: return "5906";
14929 case TG3_PHY_ID_BCM5761: return "5761";
14930 case TG3_PHY_ID_BCM5718C: return "5718C";
14931 case TG3_PHY_ID_BCM5718S: return "5718S";
14932 case TG3_PHY_ID_BCM57765: return "57765";
14933 case TG3_PHY_ID_BCM5719C: return "5719C";
14934 case TG3_PHY_ID_BCM5720C: return "5720C";
14935 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14936 case 0: return "serdes";
14937 default: return "unknown";
14941 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14943 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14944 strcpy(str, "PCI Express");
14946 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14947 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14949 strcpy(str, "PCIX:");
14951 if ((clock_ctrl == 7) ||
14952 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14953 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14954 strcat(str, "133MHz");
14955 else if (clock_ctrl == 0)
14956 strcat(str, "33MHz");
14957 else if (clock_ctrl == 2)
14958 strcat(str, "50MHz");
14959 else if (clock_ctrl == 4)
14960 strcat(str, "66MHz");
14961 else if (clock_ctrl == 6)
14962 strcat(str, "100MHz");
14964 strcpy(str, "PCI:");
14965 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14966 strcat(str, "66MHz");
14968 strcat(str, "33MHz");
14970 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14971 strcat(str, ":32-bit");
14973 strcat(str, ":64-bit");
14977 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14979 struct pci_dev *peer;
14980 unsigned int func, devnr = tp->pdev->devfn & ~7;
14982 for (func = 0; func < 8; func++) {
14983 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14984 if (peer && peer != tp->pdev)
14988 /* 5704 can be configured in single-port mode, set peer to
14989 * tp->pdev in that case.
14997 * We don't need to keep the refcount elevated; there's no way
14998 * to remove one half of this device without removing the other
15005 static void __devinit tg3_init_coal(struct tg3 *tp)
15007 struct ethtool_coalesce *ec = &tp->coal;
15009 memset(ec, 0, sizeof(*ec));
15010 ec->cmd = ETHTOOL_GCOALESCE;
15011 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15012 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15013 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15014 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15015 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15016 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15017 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15018 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15019 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15021 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15022 HOSTCC_MODE_CLRTICK_TXBD)) {
15023 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15024 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15025 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15026 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15029 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
15030 ec->rx_coalesce_usecs_irq = 0;
15031 ec->tx_coalesce_usecs_irq = 0;
15032 ec->stats_block_coalesce_usecs = 0;
15036 static const struct net_device_ops tg3_netdev_ops = {
15037 .ndo_open = tg3_open,
15038 .ndo_stop = tg3_close,
15039 .ndo_start_xmit = tg3_start_xmit,
15040 .ndo_get_stats64 = tg3_get_stats64,
15041 .ndo_validate_addr = eth_validate_addr,
15042 .ndo_set_multicast_list = tg3_set_rx_mode,
15043 .ndo_set_mac_address = tg3_set_mac_addr,
15044 .ndo_do_ioctl = tg3_ioctl,
15045 .ndo_tx_timeout = tg3_tx_timeout,
15046 .ndo_change_mtu = tg3_change_mtu,
15047 .ndo_fix_features = tg3_fix_features,
15048 #ifdef CONFIG_NET_POLL_CONTROLLER
15049 .ndo_poll_controller = tg3_poll_controller,
15053 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15054 .ndo_open = tg3_open,
15055 .ndo_stop = tg3_close,
15056 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15057 .ndo_get_stats64 = tg3_get_stats64,
15058 .ndo_validate_addr = eth_validate_addr,
15059 .ndo_set_multicast_list = tg3_set_rx_mode,
15060 .ndo_set_mac_address = tg3_set_mac_addr,
15061 .ndo_do_ioctl = tg3_ioctl,
15062 .ndo_tx_timeout = tg3_tx_timeout,
15063 .ndo_change_mtu = tg3_change_mtu,
15064 #ifdef CONFIG_NET_POLL_CONTROLLER
15065 .ndo_poll_controller = tg3_poll_controller,
15069 static int __devinit tg3_init_one(struct pci_dev *pdev,
15070 const struct pci_device_id *ent)
15072 struct net_device *dev;
15074 int i, err, pm_cap;
15075 u32 sndmbx, rcvmbx, intmbx;
15077 u64 dma_mask, persist_dma_mask;
15078 u32 hw_features = 0;
15080 printk_once(KERN_INFO "%s\n", version);
15082 err = pci_enable_device(pdev);
15084 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15088 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15090 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15091 goto err_out_disable_pdev;
15094 pci_set_master(pdev);
15096 /* Find power-management capability. */
15097 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15099 dev_err(&pdev->dev,
15100 "Cannot find Power Management capability, aborting\n");
15102 goto err_out_free_res;
15105 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15107 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15109 goto err_out_free_res;
15112 SET_NETDEV_DEV(dev, &pdev->dev);
15114 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15116 tp = netdev_priv(dev);
15119 tp->pm_cap = pm_cap;
15120 tp->rx_mode = TG3_DEF_RX_MODE;
15121 tp->tx_mode = TG3_DEF_TX_MODE;
15124 tp->msg_enable = tg3_debug;
15126 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15128 /* The word/byte swap controls here control register access byte
15129 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15132 tp->misc_host_ctrl =
15133 MISC_HOST_CTRL_MASK_PCI_INT |
15134 MISC_HOST_CTRL_WORD_SWAP |
15135 MISC_HOST_CTRL_INDIR_ACCESS |
15136 MISC_HOST_CTRL_PCISTATE_RW;
15138 /* The NONFRM (non-frame) byte/word swap controls take effect
15139 * on descriptor entries, anything which isn't packet data.
15141 * The StrongARM chips on the board (one for tx, one for rx)
15142 * are running in big-endian mode.
15144 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15145 GRC_MODE_WSWAP_NONFRM_DATA);
15146 #ifdef __BIG_ENDIAN
15147 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15149 spin_lock_init(&tp->lock);
15150 spin_lock_init(&tp->indirect_lock);
15151 INIT_WORK(&tp->reset_task, tg3_reset_task);
15153 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15155 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15157 goto err_out_free_dev;
15160 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15161 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15163 dev->ethtool_ops = &tg3_ethtool_ops;
15164 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15165 dev->irq = pdev->irq;
15167 err = tg3_get_invariants(tp);
15169 dev_err(&pdev->dev,
15170 "Problem fetching invariants of chip, aborting\n");
15171 goto err_out_iounmap;
15174 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
15175 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
15176 dev->netdev_ops = &tg3_netdev_ops;
15178 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15181 /* The EPB bridge inside 5714, 5715, and 5780 and any
15182 * device behind the EPB cannot support DMA addresses > 40-bit.
15183 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15184 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15185 * do DMA address check in tg3_start_xmit().
15187 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
15188 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15189 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
15190 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15191 #ifdef CONFIG_HIGHMEM
15192 dma_mask = DMA_BIT_MASK(64);
15195 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15197 /* Configure DMA attributes. */
15198 if (dma_mask > DMA_BIT_MASK(32)) {
15199 err = pci_set_dma_mask(pdev, dma_mask);
15201 dev->features |= NETIF_F_HIGHDMA;
15202 err = pci_set_consistent_dma_mask(pdev,
15205 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15206 "DMA for consistent allocations\n");
15207 goto err_out_iounmap;
15211 if (err || dma_mask == DMA_BIT_MASK(32)) {
15212 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15214 dev_err(&pdev->dev,
15215 "No usable DMA configuration, aborting\n");
15216 goto err_out_iounmap;
15220 tg3_init_bufmgr_config(tp);
15222 /* Selectively allow TSO based on operating conditions */
15223 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
15224 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
15225 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
15227 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
15228 tp->fw_needed = NULL;
15231 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15232 tp->fw_needed = FIRMWARE_TG3;
15234 /* TSO is on by default on chips that support hardware TSO.
15235 * Firmware TSO on older chips gives lower performance, so it
15236 * is off by default, but can be enabled using ethtool.
15238 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
15239 (dev->features & NETIF_F_IP_CSUM))
15240 hw_features |= NETIF_F_TSO;
15241 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
15242 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
15243 if (dev->features & NETIF_F_IPV6_CSUM)
15244 hw_features |= NETIF_F_TSO6;
15245 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
15246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15247 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15248 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15251 hw_features |= NETIF_F_TSO_ECN;
15254 dev->hw_features |= hw_features;
15255 dev->features |= hw_features;
15256 dev->vlan_features |= hw_features;
15258 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15259 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
15260 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15261 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
15262 tp->rx_pending = 63;
15265 err = tg3_get_device_address(tp);
15267 dev_err(&pdev->dev,
15268 "Could not obtain valid ethernet address, aborting\n");
15269 goto err_out_iounmap;
15272 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
15273 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15274 if (!tp->aperegs) {
15275 dev_err(&pdev->dev,
15276 "Cannot map APE registers, aborting\n");
15278 goto err_out_iounmap;
15281 tg3_ape_lock_init(tp);
15283 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
15284 tg3_read_dash_ver(tp);
15288 * Reset chip in case UNDI or EFI driver did not shutdown
15289 * DMA self test will enable WDMAC and we'll see (spurious)
15290 * pending DMA on the PCI bus at that point.
15292 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15293 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15294 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15295 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15298 err = tg3_test_dma(tp);
15300 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15301 goto err_out_apeunmap;
15304 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15305 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15306 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15307 for (i = 0; i < tp->irq_max; i++) {
15308 struct tg3_napi *tnapi = &tp->napi[i];
15311 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15313 tnapi->int_mbox = intmbx;
15319 tnapi->consmbox = rcvmbx;
15320 tnapi->prodmbox = sndmbx;
15323 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15325 tnapi->coal_now = HOSTCC_MODE_NOW;
15327 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
15331 * If we support MSIX, we'll be using RSS. If we're using
15332 * RSS, the first vector only handles link interrupts and the
15333 * remaining vectors handle rx and tx interrupts. Reuse the
15334 * mailbox values for the next iteration. The values we setup
15335 * above are still useful for the single vectored mode.
15350 pci_set_drvdata(pdev, dev);
15352 err = register_netdev(dev);
15354 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15355 goto err_out_apeunmap;
15358 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15359 tp->board_part_number,
15360 tp->pci_chip_rev_id,
15361 tg3_bus_string(tp, str),
15364 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15365 struct phy_device *phydev;
15366 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15368 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15369 phydev->drv->name, dev_name(&phydev->dev));
15373 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15374 ethtype = "10/100Base-TX";
15375 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15376 ethtype = "1000Base-SX";
15378 ethtype = "10/100/1000Base-T";
15380 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15381 "(WireSpeed[%d], EEE[%d])\n",
15382 tg3_phy_string(tp), ethtype,
15383 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15384 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15387 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15388 (dev->features & NETIF_F_RXCSUM) != 0,
15389 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
15390 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15391 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
15392 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
15393 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15395 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15396 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15402 iounmap(tp->aperegs);
15403 tp->aperegs = NULL;
15416 pci_release_regions(pdev);
15418 err_out_disable_pdev:
15419 pci_disable_device(pdev);
15420 pci_set_drvdata(pdev, NULL);
15424 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15426 struct net_device *dev = pci_get_drvdata(pdev);
15429 struct tg3 *tp = netdev_priv(dev);
15432 release_firmware(tp->fw);
15434 cancel_work_sync(&tp->reset_task);
15436 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
15441 unregister_netdev(dev);
15443 iounmap(tp->aperegs);
15444 tp->aperegs = NULL;
15451 pci_release_regions(pdev);
15452 pci_disable_device(pdev);
15453 pci_set_drvdata(pdev, NULL);
15457 #ifdef CONFIG_PM_SLEEP
15458 static int tg3_suspend(struct device *device)
15460 struct pci_dev *pdev = to_pci_dev(device);
15461 struct net_device *dev = pci_get_drvdata(pdev);
15462 struct tg3 *tp = netdev_priv(dev);
15465 if (!netif_running(dev))
15468 flush_work_sync(&tp->reset_task);
15470 tg3_netif_stop(tp);
15472 del_timer_sync(&tp->timer);
15474 tg3_full_lock(tp, 1);
15475 tg3_disable_ints(tp);
15476 tg3_full_unlock(tp);
15478 netif_device_detach(dev);
15480 tg3_full_lock(tp, 0);
15481 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15482 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15483 tg3_full_unlock(tp);
15485 err = tg3_power_down_prepare(tp);
15489 tg3_full_lock(tp, 0);
15491 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15492 err2 = tg3_restart_hw(tp, 1);
15496 tp->timer.expires = jiffies + tp->timer_offset;
15497 add_timer(&tp->timer);
15499 netif_device_attach(dev);
15500 tg3_netif_start(tp);
15503 tg3_full_unlock(tp);
15512 static int tg3_resume(struct device *device)
15514 struct pci_dev *pdev = to_pci_dev(device);
15515 struct net_device *dev = pci_get_drvdata(pdev);
15516 struct tg3 *tp = netdev_priv(dev);
15519 if (!netif_running(dev))
15522 netif_device_attach(dev);
15524 tg3_full_lock(tp, 0);
15526 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15527 err = tg3_restart_hw(tp, 1);
15531 tp->timer.expires = jiffies + tp->timer_offset;
15532 add_timer(&tp->timer);
15534 tg3_netif_start(tp);
15537 tg3_full_unlock(tp);
15545 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15546 #define TG3_PM_OPS (&tg3_pm_ops)
15550 #define TG3_PM_OPS NULL
15552 #endif /* CONFIG_PM_SLEEP */
15554 static struct pci_driver tg3_driver = {
15555 .name = DRV_MODULE_NAME,
15556 .id_table = tg3_pci_tbl,
15557 .probe = tg3_init_one,
15558 .remove = __devexit_p(tg3_remove_one),
15559 .driver.pm = TG3_PM_OPS,
15562 static int __init tg3_init(void)
15564 return pci_register_driver(&tg3_driver);
15567 static void __exit tg3_cleanup(void)
15569 pci_unregister_driver(&tg3_driver);
15572 module_init(tg3_init);
15573 module_exit(tg3_cleanup);