1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
5 * Note: This driver is a cleanroom reimplementation based on reverse
6 * engineered documentation written by Carl-Daniel Hailfinger
7 * and Andrew de Quincey.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4,5 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
20 * We suspect that on some hardware no TX done interrupts are generated.
21 * This means recovery from netif_stop_queue only happens if the hw timer
22 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
23 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
24 * If your hardware reliably generates tx done interrupts, then you can remove
25 * DEV_NEED_TIMERIRQ from the driver_data flags.
26 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
27 * superfluous timer interrupts from the nic.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #define FORCEDETH_VERSION "0.64"
33 #define DRV_NAME "forcedeth"
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
38 #include <linux/interrupt.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/spinlock.h>
44 #include <linux/ethtool.h>
45 #include <linux/timer.h>
46 #include <linux/skbuff.h>
47 #include <linux/mii.h>
48 #include <linux/random.h>
49 #include <linux/if_vlan.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/slab.h>
52 #include <linux/uaccess.h>
53 #include <linux/prefetch.h>
54 #include <linux/u64_stats_sync.h>
59 #define TX_WORK_PER_LOOP 64
60 #define RX_WORK_PER_LOOP 64
66 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
67 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
68 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
69 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
70 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
71 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
72 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
73 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
74 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
75 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
76 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
77 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
78 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
79 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
80 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
81 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
82 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
83 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
84 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
85 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
86 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
87 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
88 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
89 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
90 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
91 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
92 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
95 NvRegIrqStatus = 0x000,
96 #define NVREG_IRQSTAT_MIIEVENT 0x040
97 #define NVREG_IRQSTAT_MASK 0x83ff
99 #define NVREG_IRQ_RX_ERROR 0x0001
100 #define NVREG_IRQ_RX 0x0002
101 #define NVREG_IRQ_RX_NOBUF 0x0004
102 #define NVREG_IRQ_TX_ERR 0x0008
103 #define NVREG_IRQ_TX_OK 0x0010
104 #define NVREG_IRQ_TIMER 0x0020
105 #define NVREG_IRQ_LINK 0x0040
106 #define NVREG_IRQ_RX_FORCED 0x0080
107 #define NVREG_IRQ_TX_FORCED 0x0100
108 #define NVREG_IRQ_RECOVER_ERROR 0x8200
109 #define NVREG_IRQMASK_THROUGHPUT 0x00df
110 #define NVREG_IRQMASK_CPU 0x0060
111 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
112 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
113 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
115 NvRegUnknownSetupReg6 = 0x008,
116 #define NVREG_UNKSETUP6_VAL 3
119 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
120 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
122 NvRegPollingInterval = 0x00c,
123 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
124 #define NVREG_POLL_DEFAULT_CPU 13
125 NvRegMSIMap0 = 0x020,
126 NvRegMSIMap1 = 0x024,
127 NvRegMSIIrqMask = 0x030,
128 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
130 #define NVREG_MISC1_PAUSE_TX 0x01
131 #define NVREG_MISC1_HD 0x02
132 #define NVREG_MISC1_FORCE 0x3b0f3c
134 NvRegMacReset = 0x34,
135 #define NVREG_MAC_RESET_ASSERT 0x0F3
136 NvRegTransmitterControl = 0x084,
137 #define NVREG_XMITCTL_START 0x01
138 #define NVREG_XMITCTL_MGMT_ST 0x40000000
139 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
140 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
141 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
142 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
143 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
144 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
145 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
146 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
147 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
148 #define NVREG_XMITCTL_DATA_START 0x00100000
149 #define NVREG_XMITCTL_DATA_READY 0x00010000
150 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
151 NvRegTransmitterStatus = 0x088,
152 #define NVREG_XMITSTAT_BUSY 0x01
154 NvRegPacketFilterFlags = 0x8c,
155 #define NVREG_PFF_PAUSE_RX 0x08
156 #define NVREG_PFF_ALWAYS 0x7F0000
157 #define NVREG_PFF_PROMISC 0x80
158 #define NVREG_PFF_MYADDR 0x20
159 #define NVREG_PFF_LOOPBACK 0x10
161 NvRegOffloadConfig = 0x90,
162 #define NVREG_OFFLOAD_HOMEPHY 0x601
163 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
164 NvRegReceiverControl = 0x094,
165 #define NVREG_RCVCTL_START 0x01
166 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
167 NvRegReceiverStatus = 0x98,
168 #define NVREG_RCVSTAT_BUSY 0x01
170 NvRegSlotTime = 0x9c,
171 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
172 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
173 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
174 #define NVREG_SLOTTIME_HALF 0x0000ff00
175 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
176 #define NVREG_SLOTTIME_MASK 0x000000ff
178 NvRegTxDeferral = 0xA0,
179 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
180 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
181 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
182 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
183 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
184 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
185 NvRegRxDeferral = 0xA4,
186 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
187 NvRegMacAddrA = 0xA8,
188 NvRegMacAddrB = 0xAC,
189 NvRegMulticastAddrA = 0xB0,
190 #define NVREG_MCASTADDRA_FORCE 0x01
191 NvRegMulticastAddrB = 0xB4,
192 NvRegMulticastMaskA = 0xB8,
193 #define NVREG_MCASTMASKA_NONE 0xffffffff
194 NvRegMulticastMaskB = 0xBC,
195 #define NVREG_MCASTMASKB_NONE 0xffff
197 NvRegPhyInterface = 0xC0,
198 #define PHY_RGMII 0x10000000
199 NvRegBackOffControl = 0xC4,
200 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
201 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
202 #define NVREG_BKOFFCTRL_SELECT 24
203 #define NVREG_BKOFFCTRL_GEAR 12
205 NvRegTxRingPhysAddr = 0x100,
206 NvRegRxRingPhysAddr = 0x104,
207 NvRegRingSizes = 0x108,
208 #define NVREG_RINGSZ_TXSHIFT 0
209 #define NVREG_RINGSZ_RXSHIFT 16
210 NvRegTransmitPoll = 0x10c,
211 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
212 NvRegLinkSpeed = 0x110,
213 #define NVREG_LINKSPEED_FORCE 0x10000
214 #define NVREG_LINKSPEED_10 1000
215 #define NVREG_LINKSPEED_100 100
216 #define NVREG_LINKSPEED_1000 50
217 #define NVREG_LINKSPEED_MASK (0xFFF)
218 NvRegUnknownSetupReg5 = 0x130,
219 #define NVREG_UNKSETUP5_BIT31 (1<<31)
220 NvRegTxWatermark = 0x13c,
221 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
222 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
223 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
224 NvRegTxRxControl = 0x144,
225 #define NVREG_TXRXCTL_KICK 0x0001
226 #define NVREG_TXRXCTL_BIT1 0x0002
227 #define NVREG_TXRXCTL_BIT2 0x0004
228 #define NVREG_TXRXCTL_IDLE 0x0008
229 #define NVREG_TXRXCTL_RESET 0x0010
230 #define NVREG_TXRXCTL_RXCHECK 0x0400
231 #define NVREG_TXRXCTL_DESC_1 0
232 #define NVREG_TXRXCTL_DESC_2 0x002100
233 #define NVREG_TXRXCTL_DESC_3 0xc02200
234 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
235 #define NVREG_TXRXCTL_VLANINS 0x00080
236 NvRegTxRingPhysAddrHigh = 0x148,
237 NvRegRxRingPhysAddrHigh = 0x14C,
238 NvRegTxPauseFrame = 0x170,
239 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
240 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
241 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
242 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
243 NvRegTxPauseFrameLimit = 0x174,
244 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
245 NvRegMIIStatus = 0x180,
246 #define NVREG_MIISTAT_ERROR 0x0001
247 #define NVREG_MIISTAT_LINKCHANGE 0x0008
248 #define NVREG_MIISTAT_MASK_RW 0x0007
249 #define NVREG_MIISTAT_MASK_ALL 0x000f
250 NvRegMIIMask = 0x184,
251 #define NVREG_MII_LINKCHANGE 0x0008
253 NvRegAdapterControl = 0x188,
254 #define NVREG_ADAPTCTL_START 0x02
255 #define NVREG_ADAPTCTL_LINKUP 0x04
256 #define NVREG_ADAPTCTL_PHYVALID 0x40000
257 #define NVREG_ADAPTCTL_RUNNING 0x100000
258 #define NVREG_ADAPTCTL_PHYSHIFT 24
259 NvRegMIISpeed = 0x18c,
260 #define NVREG_MIISPEED_BIT8 (1<<8)
261 #define NVREG_MIIDELAY 5
262 NvRegMIIControl = 0x190,
263 #define NVREG_MIICTL_INUSE 0x08000
264 #define NVREG_MIICTL_WRITE 0x00400
265 #define NVREG_MIICTL_ADDRSHIFT 5
266 NvRegMIIData = 0x194,
267 NvRegTxUnicast = 0x1a0,
268 NvRegTxMulticast = 0x1a4,
269 NvRegTxBroadcast = 0x1a8,
270 NvRegWakeUpFlags = 0x200,
271 #define NVREG_WAKEUPFLAGS_VAL 0x7770
272 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
273 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
274 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
275 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
276 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
277 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
278 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
279 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
280 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
281 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
283 NvRegMgmtUnitGetVersion = 0x204,
284 #define NVREG_MGMTUNITGETVERSION 0x01
285 NvRegMgmtUnitVersion = 0x208,
286 #define NVREG_MGMTUNITVERSION 0x08
287 NvRegPowerCap = 0x268,
288 #define NVREG_POWERCAP_D3SUPP (1<<30)
289 #define NVREG_POWERCAP_D2SUPP (1<<26)
290 #define NVREG_POWERCAP_D1SUPP (1<<25)
291 NvRegPowerState = 0x26c,
292 #define NVREG_POWERSTATE_POWEREDUP 0x8000
293 #define NVREG_POWERSTATE_VALID 0x0100
294 #define NVREG_POWERSTATE_MASK 0x0003
295 #define NVREG_POWERSTATE_D0 0x0000
296 #define NVREG_POWERSTATE_D1 0x0001
297 #define NVREG_POWERSTATE_D2 0x0002
298 #define NVREG_POWERSTATE_D3 0x0003
299 NvRegMgmtUnitControl = 0x278,
300 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
302 NvRegTxZeroReXmt = 0x284,
303 NvRegTxOneReXmt = 0x288,
304 NvRegTxManyReXmt = 0x28c,
305 NvRegTxLateCol = 0x290,
306 NvRegTxUnderflow = 0x294,
307 NvRegTxLossCarrier = 0x298,
308 NvRegTxExcessDef = 0x29c,
309 NvRegTxRetryErr = 0x2a0,
310 NvRegRxFrameErr = 0x2a4,
311 NvRegRxExtraByte = 0x2a8,
312 NvRegRxLateCol = 0x2ac,
314 NvRegRxFrameTooLong = 0x2b4,
315 NvRegRxOverflow = 0x2b8,
316 NvRegRxFCSErr = 0x2bc,
317 NvRegRxFrameAlignErr = 0x2c0,
318 NvRegRxLenErr = 0x2c4,
319 NvRegRxUnicast = 0x2c8,
320 NvRegRxMulticast = 0x2cc,
321 NvRegRxBroadcast = 0x2d0,
323 NvRegTxFrame = 0x2d8,
325 NvRegTxPause = 0x2e0,
326 NvRegRxPause = 0x2e4,
327 NvRegRxDropFrame = 0x2e8,
328 NvRegVlanControl = 0x300,
329 #define NVREG_VLANCONTROL_ENABLE 0x2000
330 NvRegMSIXMap0 = 0x3e0,
331 NvRegMSIXMap1 = 0x3e4,
332 NvRegMSIXIrqStatus = 0x3f0,
334 NvRegPowerState2 = 0x600,
335 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
336 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
337 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
338 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
341 /* Big endian: should work, but is untested */
347 struct ring_desc_ex {
355 struct ring_desc *orig;
356 struct ring_desc_ex *ex;
359 #define FLAG_MASK_V1 0xffff0000
360 #define FLAG_MASK_V2 0xffffc000
361 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
362 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
364 #define NV_TX_LASTPACKET (1<<16)
365 #define NV_TX_RETRYERROR (1<<19)
366 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
367 #define NV_TX_FORCED_INTERRUPT (1<<24)
368 #define NV_TX_DEFERRED (1<<26)
369 #define NV_TX_CARRIERLOST (1<<27)
370 #define NV_TX_LATECOLLISION (1<<28)
371 #define NV_TX_UNDERFLOW (1<<29)
372 #define NV_TX_ERROR (1<<30)
373 #define NV_TX_VALID (1<<31)
375 #define NV_TX2_LASTPACKET (1<<29)
376 #define NV_TX2_RETRYERROR (1<<18)
377 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
378 #define NV_TX2_FORCED_INTERRUPT (1<<30)
379 #define NV_TX2_DEFERRED (1<<25)
380 #define NV_TX2_CARRIERLOST (1<<26)
381 #define NV_TX2_LATECOLLISION (1<<27)
382 #define NV_TX2_UNDERFLOW (1<<28)
383 /* error and valid are the same for both */
384 #define NV_TX2_ERROR (1<<30)
385 #define NV_TX2_VALID (1<<31)
386 #define NV_TX2_TSO (1<<28)
387 #define NV_TX2_TSO_SHIFT 14
388 #define NV_TX2_TSO_MAX_SHIFT 14
389 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
390 #define NV_TX2_CHECKSUM_L3 (1<<27)
391 #define NV_TX2_CHECKSUM_L4 (1<<26)
393 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
395 #define NV_RX_DESCRIPTORVALID (1<<16)
396 #define NV_RX_MISSEDFRAME (1<<17)
397 #define NV_RX_SUBTRACT1 (1<<18)
398 #define NV_RX_ERROR1 (1<<23)
399 #define NV_RX_ERROR2 (1<<24)
400 #define NV_RX_ERROR3 (1<<25)
401 #define NV_RX_ERROR4 (1<<26)
402 #define NV_RX_CRCERR (1<<27)
403 #define NV_RX_OVERFLOW (1<<28)
404 #define NV_RX_FRAMINGERR (1<<29)
405 #define NV_RX_ERROR (1<<30)
406 #define NV_RX_AVAIL (1<<31)
407 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
409 #define NV_RX2_CHECKSUMMASK (0x1C000000)
410 #define NV_RX2_CHECKSUM_IP (0x10000000)
411 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
412 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
413 #define NV_RX2_DESCRIPTORVALID (1<<29)
414 #define NV_RX2_SUBTRACT1 (1<<25)
415 #define NV_RX2_ERROR1 (1<<18)
416 #define NV_RX2_ERROR2 (1<<19)
417 #define NV_RX2_ERROR3 (1<<20)
418 #define NV_RX2_ERROR4 (1<<21)
419 #define NV_RX2_CRCERR (1<<22)
420 #define NV_RX2_OVERFLOW (1<<23)
421 #define NV_RX2_FRAMINGERR (1<<24)
422 /* error and avail are the same for both */
423 #define NV_RX2_ERROR (1<<30)
424 #define NV_RX2_AVAIL (1<<31)
425 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
427 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
428 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
430 /* Miscellaneous hardware related defines: */
431 #define NV_PCI_REGSZ_VER1 0x270
432 #define NV_PCI_REGSZ_VER2 0x2d4
433 #define NV_PCI_REGSZ_VER3 0x604
434 #define NV_PCI_REGSZ_MAX 0x604
436 /* various timeout delays: all in usec */
437 #define NV_TXRX_RESET_DELAY 4
438 #define NV_TXSTOP_DELAY1 10
439 #define NV_TXSTOP_DELAY1MAX 500000
440 #define NV_TXSTOP_DELAY2 100
441 #define NV_RXSTOP_DELAY1 10
442 #define NV_RXSTOP_DELAY1MAX 500000
443 #define NV_RXSTOP_DELAY2 100
444 #define NV_SETUP5_DELAY 5
445 #define NV_SETUP5_DELAYMAX 50000
446 #define NV_POWERUP_DELAY 5
447 #define NV_POWERUP_DELAYMAX 5000
448 #define NV_MIIBUSY_DELAY 50
449 #define NV_MIIPHY_DELAY 10
450 #define NV_MIIPHY_DELAYMAX 10000
451 #define NV_MAC_RESET_DELAY 64
453 #define NV_WAKEUPPATTERNS 5
454 #define NV_WAKEUPMASKENTRIES 4
456 /* General driver defaults */
457 #define NV_WATCHDOG_TIMEO (5*HZ)
459 #define RX_RING_DEFAULT 512
460 #define TX_RING_DEFAULT 256
461 #define RX_RING_MIN 128
462 #define TX_RING_MIN 64
463 #define RING_MAX_DESC_VER_1 1024
464 #define RING_MAX_DESC_VER_2_3 16384
466 /* rx/tx mac addr + type + vlan + align + slack*/
467 #define NV_RX_HEADERS (64)
468 /* even more slack. */
469 #define NV_RX_ALLOC_PAD (64)
471 /* maximum mtu size */
472 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
473 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
475 #define OOM_REFILL (1+HZ/20)
476 #define POLL_WAIT (1+HZ/100)
477 #define LINK_TIMEOUT (3*HZ)
478 #define STATS_INTERVAL (10*HZ)
482 * The nic supports three different descriptor types:
483 * - DESC_VER_1: Original
484 * - DESC_VER_2: support for jumbo frames.
485 * - DESC_VER_3: 64-bit format.
492 #define PHY_OUI_MARVELL 0x5043
493 #define PHY_OUI_CICADA 0x03f1
494 #define PHY_OUI_VITESSE 0x01c1
495 #define PHY_OUI_REALTEK 0x0732
496 #define PHY_OUI_REALTEK2 0x0020
497 #define PHYID1_OUI_MASK 0x03ff
498 #define PHYID1_OUI_SHFT 6
499 #define PHYID2_OUI_MASK 0xfc00
500 #define PHYID2_OUI_SHFT 10
501 #define PHYID2_MODEL_MASK 0x03f0
502 #define PHY_MODEL_REALTEK_8211 0x0110
503 #define PHY_REV_MASK 0x0001
504 #define PHY_REV_REALTEK_8211B 0x0000
505 #define PHY_REV_REALTEK_8211C 0x0001
506 #define PHY_MODEL_REALTEK_8201 0x0200
507 #define PHY_MODEL_MARVELL_E3016 0x0220
508 #define PHY_MARVELL_E3016_INITMASK 0x0300
509 #define PHY_CICADA_INIT1 0x0f000
510 #define PHY_CICADA_INIT2 0x0e00
511 #define PHY_CICADA_INIT3 0x01000
512 #define PHY_CICADA_INIT4 0x0200
513 #define PHY_CICADA_INIT5 0x0004
514 #define PHY_CICADA_INIT6 0x02000
515 #define PHY_VITESSE_INIT_REG1 0x1f
516 #define PHY_VITESSE_INIT_REG2 0x10
517 #define PHY_VITESSE_INIT_REG3 0x11
518 #define PHY_VITESSE_INIT_REG4 0x12
519 #define PHY_VITESSE_INIT_MSK1 0xc
520 #define PHY_VITESSE_INIT_MSK2 0x0180
521 #define PHY_VITESSE_INIT1 0x52b5
522 #define PHY_VITESSE_INIT2 0xaf8a
523 #define PHY_VITESSE_INIT3 0x8
524 #define PHY_VITESSE_INIT4 0x8f8a
525 #define PHY_VITESSE_INIT5 0xaf86
526 #define PHY_VITESSE_INIT6 0x8f86
527 #define PHY_VITESSE_INIT7 0xaf82
528 #define PHY_VITESSE_INIT8 0x0100
529 #define PHY_VITESSE_INIT9 0x8f82
530 #define PHY_VITESSE_INIT10 0x0
531 #define PHY_REALTEK_INIT_REG1 0x1f
532 #define PHY_REALTEK_INIT_REG2 0x19
533 #define PHY_REALTEK_INIT_REG3 0x13
534 #define PHY_REALTEK_INIT_REG4 0x14
535 #define PHY_REALTEK_INIT_REG5 0x18
536 #define PHY_REALTEK_INIT_REG6 0x11
537 #define PHY_REALTEK_INIT_REG7 0x01
538 #define PHY_REALTEK_INIT1 0x0000
539 #define PHY_REALTEK_INIT2 0x8e00
540 #define PHY_REALTEK_INIT3 0x0001
541 #define PHY_REALTEK_INIT4 0xad17
542 #define PHY_REALTEK_INIT5 0xfb54
543 #define PHY_REALTEK_INIT6 0xf5c7
544 #define PHY_REALTEK_INIT7 0x1000
545 #define PHY_REALTEK_INIT8 0x0003
546 #define PHY_REALTEK_INIT9 0x0008
547 #define PHY_REALTEK_INIT10 0x0005
548 #define PHY_REALTEK_INIT11 0x0200
549 #define PHY_REALTEK_INIT_MSK1 0x0003
551 #define PHY_GIGABIT 0x0100
553 #define PHY_TIMEOUT 0x1
554 #define PHY_ERROR 0x2
558 #define PHY_HALF 0x100
560 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
561 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
562 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
563 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
564 #define NV_PAUSEFRAME_RX_REQ 0x0010
565 #define NV_PAUSEFRAME_TX_REQ 0x0020
566 #define NV_PAUSEFRAME_AUTONEG 0x0040
568 /* MSI/MSI-X defines */
569 #define NV_MSI_X_MAX_VECTORS 8
570 #define NV_MSI_X_VECTORS_MASK 0x000f
571 #define NV_MSI_CAPABLE 0x0010
572 #define NV_MSI_X_CAPABLE 0x0020
573 #define NV_MSI_ENABLED 0x0040
574 #define NV_MSI_X_ENABLED 0x0080
576 #define NV_MSI_X_VECTOR_ALL 0x0
577 #define NV_MSI_X_VECTOR_RX 0x0
578 #define NV_MSI_X_VECTOR_TX 0x1
579 #define NV_MSI_X_VECTOR_OTHER 0x2
581 #define NV_MSI_PRIV_OFFSET 0x68
582 #define NV_MSI_PRIV_VALUE 0xffffffff
584 #define NV_RESTART_TX 0x1
585 #define NV_RESTART_RX 0x2
587 #define NV_TX_LIMIT_COUNT 16
589 #define NV_DYNAMIC_THRESHOLD 4
590 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
593 struct nv_ethtool_str {
594 char name[ETH_GSTRING_LEN];
597 static const struct nv_ethtool_str nv_estats_str[] = {
598 { "tx_bytes" }, /* includes Ethernet FCS CRC */
602 { "tx_late_collision" },
603 { "tx_fifo_errors" },
604 { "tx_carrier_errors" },
605 { "tx_excess_deferral" },
606 { "tx_retry_error" },
607 { "rx_frame_error" },
609 { "rx_late_collision" },
611 { "rx_frame_too_long" },
612 { "rx_over_errors" },
614 { "rx_frame_align_error" },
615 { "rx_length_error" },
620 { "rx_errors_total" },
621 { "tx_errors_total" },
623 /* version 2 stats */
626 { "rx_bytes" }, /* includes Ethernet FCS CRC */
631 /* version 3 stats */
637 struct nv_ethtool_stats {
638 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
642 u64 tx_late_collision;
644 u64 tx_carrier_errors;
645 u64 tx_excess_deferral;
649 u64 rx_late_collision;
651 u64 rx_frame_too_long;
654 u64 rx_frame_align_error;
659 u64 rx_packets; /* should be ifconfig->rx_packets */
663 /* version 2 stats */
665 u64 tx_packets; /* should be ifconfig->tx_packets */
666 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
671 /* version 3 stats */
677 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
678 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
679 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
682 #define NV_TEST_COUNT_BASE 3
683 #define NV_TEST_COUNT_EXTENDED 4
685 static const struct nv_ethtool_str nv_etests_str[] = {
686 { "link (online/offline)" },
687 { "register (offline) " },
688 { "interrupt (offline) " },
689 { "loopback (offline) " }
692 struct register_test {
697 static const struct register_test nv_registers_test[] = {
698 { NvRegUnknownSetupReg6, 0x01 },
699 { NvRegMisc1, 0x03c },
700 { NvRegOffloadConfig, 0x03ff },
701 { NvRegMulticastAddrA, 0xffffffff },
702 { NvRegTxWatermark, 0x0ff },
703 { NvRegWakeUpFlags, 0x07777 },
710 unsigned int dma_len:31;
711 unsigned int dma_single:1;
712 struct ring_desc_ex *first_tx_desc;
713 struct nv_skb_map *next_tx_ctx;
718 * All hardware access under netdev_priv(dev)->lock, except the performance
720 * - rx is (pseudo-) lockless: it relies on the single-threading provided
721 * by the arch code for interrupts.
722 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
723 * needs netdev_priv(dev)->lock :-(
724 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
726 * Hardware stats updates are protected by hwstats_lock:
727 * - updated by nv_do_stats_poll (timer). This is meant to avoid
728 * integer wraparound in the NIC stats registers, at low frequency
730 * - updated by nv_get_ethtool_stats + nv_get_stats64
732 * Software stats are accessed only through 64b synchronization points
733 * and are not subject to other synchronization techniques (single
734 * update thread on the TX or RX paths).
737 /* in dev: base, irq */
741 struct net_device *dev;
742 struct napi_struct napi;
744 /* hardware stats are updated in syscall and timer */
745 spinlock_t hwstats_lock;
746 struct nv_ethtool_stats estats;
755 unsigned int phy_oui;
756 unsigned int phy_model;
757 unsigned int phy_rev;
763 /* General data: RO fields */
764 dma_addr_t ring_addr;
765 struct pci_dev *pci_dev;
781 /* rx specific fields.
782 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
784 union ring_type get_rx, put_rx, last_rx;
785 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
786 struct nv_skb_map *last_rx_ctx;
787 struct nv_skb_map *rx_skb;
789 union ring_type rx_ring;
790 unsigned int rx_buf_sz;
791 unsigned int pkt_limit;
792 struct timer_list oom_kick;
793 struct timer_list nic_poll;
794 struct timer_list stats_poll;
798 /* RX software stats */
799 struct u64_stats_sync swstats_rx_syncp;
801 u64 stat_rx_bytes; /* not always available in HW */
802 u64 stat_rx_missed_errors;
805 /* media detection workaround.
806 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
809 unsigned long link_timeout;
811 * tx specific fields.
813 union ring_type get_tx, put_tx, last_tx;
814 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
815 struct nv_skb_map *last_tx_ctx;
816 struct nv_skb_map *tx_skb;
818 union ring_type tx_ring;
822 u32 tx_pkts_in_progress;
823 struct nv_skb_map *tx_change_owner;
824 struct nv_skb_map *tx_end_flip;
827 /* TX software stats */
828 struct u64_stats_sync swstats_tx_syncp;
829 u64 stat_tx_packets; /* not always available in HW */
833 /* msi/msi-x fields */
835 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
840 /* power saved state */
841 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
843 /* for different msi-x irq type */
844 char name_rx[IFNAMSIZ + 3]; /* -rx */
845 char name_tx[IFNAMSIZ + 3]; /* -tx */
846 char name_other[IFNAMSIZ + 6]; /* -other */
850 * Maximum number of loops until we assume that a bit in the irq mask
851 * is stuck. Overridable with module param.
853 static int max_interrupt_work = 4;
856 * Optimization can be either throuput mode or cpu mode
858 * Throughput Mode: Every tx and rx packet will generate an interrupt.
859 * CPU Mode: Interrupts are controlled by a timer.
862 NV_OPTIMIZATION_MODE_THROUGHPUT,
863 NV_OPTIMIZATION_MODE_CPU,
864 NV_OPTIMIZATION_MODE_DYNAMIC
866 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
869 * Poll interval for timer irq
871 * This interval determines how frequent an interrupt is generated.
872 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
873 * Min = 0, and Max = 65535
875 static int poll_interval = -1;
884 static int msi = NV_MSI_INT_ENABLED;
890 NV_MSIX_INT_DISABLED,
893 static int msix = NV_MSIX_INT_ENABLED;
899 NV_DMA_64BIT_DISABLED,
902 static int dma_64bit = NV_DMA_64BIT_ENABLED;
905 * Debug output control for tx_timeout
907 static bool debug_tx_timeout = false;
910 * Crossover Detection
911 * Realtek 8201 phy + some OEM boards do not work properly.
914 NV_CROSSOVER_DETECTION_DISABLED,
915 NV_CROSSOVER_DETECTION_ENABLED
917 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
920 * Power down phy when interface is down (persists through reboot;
921 * older Linux and other OSes may not power it up again)
923 static int phy_power_down;
925 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
927 return netdev_priv(dev);
930 static inline u8 __iomem *get_hwbase(struct net_device *dev)
932 return ((struct fe_priv *)netdev_priv(dev))->base;
935 static inline void pci_push(u8 __iomem *base)
937 /* force out pending posted writes */
941 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
943 return le32_to_cpu(prd->flaglen)
944 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
947 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
949 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
952 static bool nv_optimized(struct fe_priv *np)
954 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
959 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
960 int delay, int delaymax)
962 u8 __iomem *base = get_hwbase(dev);
970 } while ((readl(base + offset) & mask) != target);
974 #define NV_SETUP_RX_RING 0x01
975 #define NV_SETUP_TX_RING 0x02
977 static inline u32 dma_low(dma_addr_t addr)
982 static inline u32 dma_high(dma_addr_t addr)
984 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
987 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
989 struct fe_priv *np = get_nvpriv(dev);
990 u8 __iomem *base = get_hwbase(dev);
992 if (!nv_optimized(np)) {
993 if (rxtx_flags & NV_SETUP_RX_RING)
994 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
995 if (rxtx_flags & NV_SETUP_TX_RING)
996 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
998 if (rxtx_flags & NV_SETUP_RX_RING) {
999 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1000 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1002 if (rxtx_flags & NV_SETUP_TX_RING) {
1003 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1004 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1009 static void free_rings(struct net_device *dev)
1011 struct fe_priv *np = get_nvpriv(dev);
1013 if (!nv_optimized(np)) {
1014 if (np->rx_ring.orig)
1015 dma_free_coherent(&np->pci_dev->dev,
1016 sizeof(struct ring_desc) *
1019 np->rx_ring.orig, np->ring_addr);
1022 dma_free_coherent(&np->pci_dev->dev,
1023 sizeof(struct ring_desc_ex) *
1026 np->rx_ring.ex, np->ring_addr);
1032 static int using_multi_irqs(struct net_device *dev)
1034 struct fe_priv *np = get_nvpriv(dev);
1036 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1037 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1038 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1044 static void nv_txrx_gate(struct net_device *dev, bool gate)
1046 struct fe_priv *np = get_nvpriv(dev);
1047 u8 __iomem *base = get_hwbase(dev);
1050 if (!np->mac_in_use &&
1051 (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1052 powerstate = readl(base + NvRegPowerState2);
1054 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1056 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1057 writel(powerstate, base + NvRegPowerState2);
1061 static void nv_enable_irq(struct net_device *dev)
1063 struct fe_priv *np = get_nvpriv(dev);
1065 if (!using_multi_irqs(dev)) {
1066 if (np->msi_flags & NV_MSI_X_ENABLED)
1067 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1069 enable_irq(np->pci_dev->irq);
1071 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1072 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1073 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1077 static void nv_disable_irq(struct net_device *dev)
1079 struct fe_priv *np = get_nvpriv(dev);
1081 if (!using_multi_irqs(dev)) {
1082 if (np->msi_flags & NV_MSI_X_ENABLED)
1083 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1085 disable_irq(np->pci_dev->irq);
1087 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1088 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1089 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1093 /* In MSIX mode, a write to irqmask behaves as XOR */
1094 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1096 u8 __iomem *base = get_hwbase(dev);
1098 writel(mask, base + NvRegIrqMask);
1101 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1103 struct fe_priv *np = get_nvpriv(dev);
1104 u8 __iomem *base = get_hwbase(dev);
1106 if (np->msi_flags & NV_MSI_X_ENABLED) {
1107 writel(mask, base + NvRegIrqMask);
1109 if (np->msi_flags & NV_MSI_ENABLED)
1110 writel(0, base + NvRegMSIIrqMask);
1111 writel(0, base + NvRegIrqMask);
1115 static void nv_napi_enable(struct net_device *dev)
1117 struct fe_priv *np = get_nvpriv(dev);
1119 napi_enable(&np->napi);
1122 static void nv_napi_disable(struct net_device *dev)
1124 struct fe_priv *np = get_nvpriv(dev);
1126 napi_disable(&np->napi);
1129 #define MII_READ (-1)
1130 /* mii_rw: read/write a register on the PHY.
1132 * Caller must guarantee serialization
1134 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1136 u8 __iomem *base = get_hwbase(dev);
1140 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1142 reg = readl(base + NvRegMIIControl);
1143 if (reg & NVREG_MIICTL_INUSE) {
1144 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1145 udelay(NV_MIIBUSY_DELAY);
1148 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1149 if (value != MII_READ) {
1150 writel(value, base + NvRegMIIData);
1151 reg |= NVREG_MIICTL_WRITE;
1153 writel(reg, base + NvRegMIIControl);
1155 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1156 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1158 } else if (value != MII_READ) {
1159 /* it was a write operation - fewer failures are detectable */
1161 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1164 retval = readl(base + NvRegMIIData);
1170 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1172 struct fe_priv *np = netdev_priv(dev);
1174 unsigned int tries = 0;
1176 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1180 /* wait for 500ms */
1183 /* must wait till reset is deasserted */
1184 while (miicontrol & BMCR_RESET) {
1185 usleep_range(10000, 20000);
1186 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1187 /* FIXME: 100 tries seem excessive */
1194 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1196 static const struct {
1200 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1201 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1202 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1203 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1204 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1205 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1206 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1210 for (i = 0; i < ARRAY_SIZE(ri); i++) {
1211 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1218 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1221 u8 __iomem *base = get_hwbase(dev);
1222 u32 powerstate = readl(base + NvRegPowerState2);
1224 /* need to perform hw phy reset */
1225 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1226 writel(powerstate, base + NvRegPowerState2);
1229 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1230 writel(powerstate, base + NvRegPowerState2);
1233 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1234 reg |= PHY_REALTEK_INIT9;
1235 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1237 if (mii_rw(dev, np->phyaddr,
1238 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1240 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1241 if (!(reg & PHY_REALTEK_INIT11)) {
1242 reg |= PHY_REALTEK_INIT11;
1243 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1246 if (mii_rw(dev, np->phyaddr,
1247 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1253 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1257 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1258 phy_reserved = mii_rw(dev, np->phyaddr,
1259 PHY_REALTEK_INIT_REG6, MII_READ);
1260 phy_reserved |= PHY_REALTEK_INIT7;
1261 if (mii_rw(dev, np->phyaddr,
1262 PHY_REALTEK_INIT_REG6, phy_reserved))
1269 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1273 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1274 if (mii_rw(dev, np->phyaddr,
1275 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1277 phy_reserved = mii_rw(dev, np->phyaddr,
1278 PHY_REALTEK_INIT_REG2, MII_READ);
1279 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1280 phy_reserved |= PHY_REALTEK_INIT3;
1281 if (mii_rw(dev, np->phyaddr,
1282 PHY_REALTEK_INIT_REG2, phy_reserved))
1284 if (mii_rw(dev, np->phyaddr,
1285 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1292 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1297 if (phyinterface & PHY_RGMII) {
1298 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1299 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1300 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1301 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1303 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1304 phy_reserved |= PHY_CICADA_INIT5;
1305 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1308 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1309 phy_reserved |= PHY_CICADA_INIT6;
1310 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1316 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1320 if (mii_rw(dev, np->phyaddr,
1321 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1323 if (mii_rw(dev, np->phyaddr,
1324 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1326 phy_reserved = mii_rw(dev, np->phyaddr,
1327 PHY_VITESSE_INIT_REG4, MII_READ);
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1330 phy_reserved = mii_rw(dev, np->phyaddr,
1331 PHY_VITESSE_INIT_REG3, MII_READ);
1332 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1333 phy_reserved |= PHY_VITESSE_INIT3;
1334 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1336 if (mii_rw(dev, np->phyaddr,
1337 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1339 if (mii_rw(dev, np->phyaddr,
1340 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1342 phy_reserved = mii_rw(dev, np->phyaddr,
1343 PHY_VITESSE_INIT_REG4, MII_READ);
1344 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1345 phy_reserved |= PHY_VITESSE_INIT3;
1346 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1348 phy_reserved = mii_rw(dev, np->phyaddr,
1349 PHY_VITESSE_INIT_REG3, MII_READ);
1350 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1352 if (mii_rw(dev, np->phyaddr,
1353 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1355 if (mii_rw(dev, np->phyaddr,
1356 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1358 phy_reserved = mii_rw(dev, np->phyaddr,
1359 PHY_VITESSE_INIT_REG4, MII_READ);
1360 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1362 phy_reserved = mii_rw(dev, np->phyaddr,
1363 PHY_VITESSE_INIT_REG3, MII_READ);
1364 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1365 phy_reserved |= PHY_VITESSE_INIT8;
1366 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1368 if (mii_rw(dev, np->phyaddr,
1369 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1371 if (mii_rw(dev, np->phyaddr,
1372 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1378 static int phy_init(struct net_device *dev)
1380 struct fe_priv *np = get_nvpriv(dev);
1381 u8 __iomem *base = get_hwbase(dev);
1383 u32 mii_status, mii_control, mii_control_1000, reg;
1385 /* phy errata for E3016 phy */
1386 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1387 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1388 reg &= ~PHY_MARVELL_E3016_INITMASK;
1389 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1390 netdev_info(dev, "%s: phy write to errata reg failed\n",
1391 pci_name(np->pci_dev));
1395 if (np->phy_oui == PHY_OUI_REALTEK) {
1396 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1397 np->phy_rev == PHY_REV_REALTEK_8211B) {
1398 if (init_realtek_8211b(dev, np)) {
1399 netdev_info(dev, "%s: phy init failed\n",
1400 pci_name(np->pci_dev));
1403 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1404 np->phy_rev == PHY_REV_REALTEK_8211C) {
1405 if (init_realtek_8211c(dev, np)) {
1406 netdev_info(dev, "%s: phy init failed\n",
1407 pci_name(np->pci_dev));
1410 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1411 if (init_realtek_8201(dev, np)) {
1412 netdev_info(dev, "%s: phy init failed\n",
1413 pci_name(np->pci_dev));
1419 /* set advertise register */
1420 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1421 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1422 ADVERTISE_100HALF | ADVERTISE_100FULL |
1423 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1424 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1425 netdev_info(dev, "%s: phy write to advertise failed\n",
1426 pci_name(np->pci_dev));
1430 /* get phy interface type */
1431 phyinterface = readl(base + NvRegPhyInterface);
1433 /* see if gigabit phy */
1434 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1435 if (mii_status & PHY_GIGABIT) {
1436 np->gigabit = PHY_GIGABIT;
1437 mii_control_1000 = mii_rw(dev, np->phyaddr,
1438 MII_CTRL1000, MII_READ);
1439 mii_control_1000 &= ~ADVERTISE_1000HALF;
1440 if (phyinterface & PHY_RGMII)
1441 mii_control_1000 |= ADVERTISE_1000FULL;
1443 mii_control_1000 &= ~ADVERTISE_1000FULL;
1445 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1446 netdev_info(dev, "%s: phy init failed\n",
1447 pci_name(np->pci_dev));
1453 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1454 mii_control |= BMCR_ANENABLE;
1456 if (np->phy_oui == PHY_OUI_REALTEK &&
1457 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1458 np->phy_rev == PHY_REV_REALTEK_8211C) {
1459 /* start autoneg since we already performed hw reset above */
1460 mii_control |= BMCR_ANRESTART;
1461 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1462 netdev_info(dev, "%s: phy init failed\n",
1463 pci_name(np->pci_dev));
1468 * (certain phys need bmcr to be setup with reset)
1470 if (phy_reset(dev, mii_control)) {
1471 netdev_info(dev, "%s: phy reset failed\n",
1472 pci_name(np->pci_dev));
1477 /* phy vendor specific configuration */
1478 if (np->phy_oui == PHY_OUI_CICADA) {
1479 if (init_cicada(dev, np, phyinterface)) {
1480 netdev_info(dev, "%s: phy init failed\n",
1481 pci_name(np->pci_dev));
1484 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1485 if (init_vitesse(dev, np)) {
1486 netdev_info(dev, "%s: phy init failed\n",
1487 pci_name(np->pci_dev));
1490 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1491 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1492 np->phy_rev == PHY_REV_REALTEK_8211B) {
1493 /* reset could have cleared these out, set them back */
1494 if (init_realtek_8211b(dev, np)) {
1495 netdev_info(dev, "%s: phy init failed\n",
1496 pci_name(np->pci_dev));
1499 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1500 if (init_realtek_8201(dev, np) ||
1501 init_realtek_8201_cross(dev, np)) {
1502 netdev_info(dev, "%s: phy init failed\n",
1503 pci_name(np->pci_dev));
1509 /* some phys clear out pause advertisement on reset, set it back */
1510 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1512 /* restart auto negotiation, power down phy */
1513 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1514 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1516 mii_control |= BMCR_PDOWN;
1517 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1523 static void nv_start_rx(struct net_device *dev)
1525 struct fe_priv *np = netdev_priv(dev);
1526 u8 __iomem *base = get_hwbase(dev);
1527 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1529 /* Already running? Stop it. */
1530 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1531 rx_ctrl &= ~NVREG_RCVCTL_START;
1532 writel(rx_ctrl, base + NvRegReceiverControl);
1535 writel(np->linkspeed, base + NvRegLinkSpeed);
1537 rx_ctrl |= NVREG_RCVCTL_START;
1539 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1540 writel(rx_ctrl, base + NvRegReceiverControl);
1544 static void nv_stop_rx(struct net_device *dev)
1546 struct fe_priv *np = netdev_priv(dev);
1547 u8 __iomem *base = get_hwbase(dev);
1548 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1550 if (!np->mac_in_use)
1551 rx_ctrl &= ~NVREG_RCVCTL_START;
1553 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1554 writel(rx_ctrl, base + NvRegReceiverControl);
1555 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1556 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1557 netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1560 udelay(NV_RXSTOP_DELAY2);
1561 if (!np->mac_in_use)
1562 writel(0, base + NvRegLinkSpeed);
1565 static void nv_start_tx(struct net_device *dev)
1567 struct fe_priv *np = netdev_priv(dev);
1568 u8 __iomem *base = get_hwbase(dev);
1569 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1571 tx_ctrl |= NVREG_XMITCTL_START;
1573 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1574 writel(tx_ctrl, base + NvRegTransmitterControl);
1578 static void nv_stop_tx(struct net_device *dev)
1580 struct fe_priv *np = netdev_priv(dev);
1581 u8 __iomem *base = get_hwbase(dev);
1582 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1584 if (!np->mac_in_use)
1585 tx_ctrl &= ~NVREG_XMITCTL_START;
1587 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1588 writel(tx_ctrl, base + NvRegTransmitterControl);
1589 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1590 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1591 netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1594 udelay(NV_TXSTOP_DELAY2);
1595 if (!np->mac_in_use)
1596 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1597 base + NvRegTransmitPoll);
1600 static void nv_start_rxtx(struct net_device *dev)
1606 static void nv_stop_rxtx(struct net_device *dev)
1612 static void nv_txrx_reset(struct net_device *dev)
1614 struct fe_priv *np = netdev_priv(dev);
1615 u8 __iomem *base = get_hwbase(dev);
1617 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1619 udelay(NV_TXRX_RESET_DELAY);
1620 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1624 static void nv_mac_reset(struct net_device *dev)
1626 struct fe_priv *np = netdev_priv(dev);
1627 u8 __iomem *base = get_hwbase(dev);
1628 u32 temp1, temp2, temp3;
1630 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1633 /* save registers since they will be cleared on reset */
1634 temp1 = readl(base + NvRegMacAddrA);
1635 temp2 = readl(base + NvRegMacAddrB);
1636 temp3 = readl(base + NvRegTransmitPoll);
1638 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1640 udelay(NV_MAC_RESET_DELAY);
1641 writel(0, base + NvRegMacReset);
1643 udelay(NV_MAC_RESET_DELAY);
1645 /* restore saved registers */
1646 writel(temp1, base + NvRegMacAddrA);
1647 writel(temp2, base + NvRegMacAddrB);
1648 writel(temp3, base + NvRegTransmitPoll);
1650 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1654 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1655 static void nv_update_stats(struct net_device *dev)
1657 struct fe_priv *np = netdev_priv(dev);
1658 u8 __iomem *base = get_hwbase(dev);
1660 /* If it happens that this is run in top-half context, then
1661 * replace the spin_lock of hwstats_lock with
1662 * spin_lock_irqsave() in calling functions. */
1663 WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
1664 assert_spin_locked(&np->hwstats_lock);
1666 /* query hardware */
1667 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1668 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1669 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1670 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1671 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1672 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1673 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1674 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1675 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1676 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1677 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1678 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1679 np->estats.rx_runt += readl(base + NvRegRxRunt);
1680 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1681 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1682 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1683 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1684 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1685 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1686 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1687 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1688 np->estats.rx_packets =
1689 np->estats.rx_unicast +
1690 np->estats.rx_multicast +
1691 np->estats.rx_broadcast;
1692 np->estats.rx_errors_total =
1693 np->estats.rx_crc_errors +
1694 np->estats.rx_over_errors +
1695 np->estats.rx_frame_error +
1696 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1697 np->estats.rx_late_collision +
1698 np->estats.rx_runt +
1699 np->estats.rx_frame_too_long;
1700 np->estats.tx_errors_total =
1701 np->estats.tx_late_collision +
1702 np->estats.tx_fifo_errors +
1703 np->estats.tx_carrier_errors +
1704 np->estats.tx_excess_deferral +
1705 np->estats.tx_retry_error;
1707 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1708 np->estats.tx_deferral += readl(base + NvRegTxDef);
1709 np->estats.tx_packets += readl(base + NvRegTxFrame);
1710 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1711 np->estats.tx_pause += readl(base + NvRegTxPause);
1712 np->estats.rx_pause += readl(base + NvRegRxPause);
1713 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1714 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1717 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1718 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1719 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1720 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1725 * nv_get_stats64: dev->ndo_get_stats64 function
1726 * Get latest stats value from the nic.
1727 * Called with read_lock(&dev_base_lock) held for read -
1728 * only synchronized against unregister_netdevice.
1731 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1732 __acquires(&netdev_priv(dev)->hwstats_lock)
1733 __releases(&netdev_priv(dev)->hwstats_lock)
1735 struct fe_priv *np = netdev_priv(dev);
1736 unsigned int syncp_start;
1739 * Note: because HW stats are not always available and for
1740 * consistency reasons, the following ifconfig stats are
1741 * managed by software: rx_bytes, tx_bytes, rx_packets and
1742 * tx_packets. The related hardware stats reported by ethtool
1743 * should be equivalent to these ifconfig stats, with 4
1744 * additional bytes per packet (Ethernet FCS CRC), except for
1745 * tx_packets when TSO kicks in.
1748 /* software stats */
1750 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1751 storage->rx_packets = np->stat_rx_packets;
1752 storage->rx_bytes = np->stat_rx_bytes;
1753 storage->rx_dropped = np->stat_rx_dropped;
1754 storage->rx_missed_errors = np->stat_rx_missed_errors;
1755 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1758 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1759 storage->tx_packets = np->stat_tx_packets;
1760 storage->tx_bytes = np->stat_tx_bytes;
1761 storage->tx_dropped = np->stat_tx_dropped;
1762 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1764 /* If the nic supports hw counters then retrieve latest values */
1765 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1766 spin_lock_bh(&np->hwstats_lock);
1768 nv_update_stats(dev);
1771 storage->rx_errors = np->estats.rx_errors_total;
1772 storage->tx_errors = np->estats.tx_errors_total;
1774 /* meaningful only when NIC supports stats v3 */
1775 storage->multicast = np->estats.rx_multicast;
1777 /* detailed rx_errors */
1778 storage->rx_length_errors = np->estats.rx_length_error;
1779 storage->rx_over_errors = np->estats.rx_over_errors;
1780 storage->rx_crc_errors = np->estats.rx_crc_errors;
1781 storage->rx_frame_errors = np->estats.rx_frame_align_error;
1782 storage->rx_fifo_errors = np->estats.rx_drop_frame;
1784 /* detailed tx_errors */
1785 storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1786 storage->tx_fifo_errors = np->estats.tx_fifo_errors;
1788 spin_unlock_bh(&np->hwstats_lock);
1793 * nv_alloc_rx: fill rx ring entries.
1794 * Return 1 if the allocations for the skbs failed and the
1795 * rx engine is without Available descriptors
1797 static int nv_alloc_rx(struct net_device *dev)
1799 struct fe_priv *np = netdev_priv(dev);
1800 struct ring_desc *less_rx;
1802 less_rx = np->get_rx.orig;
1803 if (less_rx-- == np->rx_ring.orig)
1804 less_rx = np->last_rx.orig;
1806 while (np->put_rx.orig != less_rx) {
1807 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1809 np->put_rx_ctx->skb = skb;
1810 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1814 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1815 np->put_rx_ctx->dma))) {
1817 goto packet_dropped;
1819 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1820 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1822 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1823 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1824 np->put_rx.orig = np->rx_ring.orig;
1825 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1826 np->put_rx_ctx = np->rx_skb;
1829 u64_stats_update_begin(&np->swstats_rx_syncp);
1830 np->stat_rx_dropped++;
1831 u64_stats_update_end(&np->swstats_rx_syncp);
1838 static int nv_alloc_rx_optimized(struct net_device *dev)
1840 struct fe_priv *np = netdev_priv(dev);
1841 struct ring_desc_ex *less_rx;
1843 less_rx = np->get_rx.ex;
1844 if (less_rx-- == np->rx_ring.ex)
1845 less_rx = np->last_rx.ex;
1847 while (np->put_rx.ex != less_rx) {
1848 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1850 np->put_rx_ctx->skb = skb;
1851 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1855 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1856 np->put_rx_ctx->dma))) {
1858 goto packet_dropped;
1860 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1861 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1862 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1864 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1865 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1866 np->put_rx.ex = np->rx_ring.ex;
1867 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1868 np->put_rx_ctx = np->rx_skb;
1871 u64_stats_update_begin(&np->swstats_rx_syncp);
1872 np->stat_rx_dropped++;
1873 u64_stats_update_end(&np->swstats_rx_syncp);
1880 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1881 static void nv_do_rx_refill(struct timer_list *t)
1883 struct fe_priv *np = from_timer(np, t, oom_kick);
1885 /* Just reschedule NAPI rx processing */
1886 napi_schedule(&np->napi);
1889 static void nv_init_rx(struct net_device *dev)
1891 struct fe_priv *np = netdev_priv(dev);
1894 np->get_rx = np->rx_ring;
1895 np->put_rx = np->rx_ring;
1897 if (!nv_optimized(np))
1898 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1900 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1901 np->get_rx_ctx = np->rx_skb;
1902 np->put_rx_ctx = np->rx_skb;
1903 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1905 for (i = 0; i < np->rx_ring_size; i++) {
1906 if (!nv_optimized(np)) {
1907 np->rx_ring.orig[i].flaglen = 0;
1908 np->rx_ring.orig[i].buf = 0;
1910 np->rx_ring.ex[i].flaglen = 0;
1911 np->rx_ring.ex[i].txvlan = 0;
1912 np->rx_ring.ex[i].bufhigh = 0;
1913 np->rx_ring.ex[i].buflow = 0;
1915 np->rx_skb[i].skb = NULL;
1916 np->rx_skb[i].dma = 0;
1920 static void nv_init_tx(struct net_device *dev)
1922 struct fe_priv *np = netdev_priv(dev);
1925 np->get_tx = np->tx_ring;
1926 np->put_tx = np->tx_ring;
1928 if (!nv_optimized(np))
1929 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1931 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1932 np->get_tx_ctx = np->tx_skb;
1933 np->put_tx_ctx = np->tx_skb;
1934 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1935 netdev_reset_queue(np->dev);
1936 np->tx_pkts_in_progress = 0;
1937 np->tx_change_owner = NULL;
1938 np->tx_end_flip = NULL;
1941 for (i = 0; i < np->tx_ring_size; i++) {
1942 if (!nv_optimized(np)) {
1943 np->tx_ring.orig[i].flaglen = 0;
1944 np->tx_ring.orig[i].buf = 0;
1946 np->tx_ring.ex[i].flaglen = 0;
1947 np->tx_ring.ex[i].txvlan = 0;
1948 np->tx_ring.ex[i].bufhigh = 0;
1949 np->tx_ring.ex[i].buflow = 0;
1951 np->tx_skb[i].skb = NULL;
1952 np->tx_skb[i].dma = 0;
1953 np->tx_skb[i].dma_len = 0;
1954 np->tx_skb[i].dma_single = 0;
1955 np->tx_skb[i].first_tx_desc = NULL;
1956 np->tx_skb[i].next_tx_ctx = NULL;
1960 static int nv_init_ring(struct net_device *dev)
1962 struct fe_priv *np = netdev_priv(dev);
1967 if (!nv_optimized(np))
1968 return nv_alloc_rx(dev);
1970 return nv_alloc_rx_optimized(dev);
1973 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1976 if (tx_skb->dma_single)
1977 dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
1981 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
1988 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1990 nv_unmap_txskb(np, tx_skb);
1992 dev_kfree_skb_any(tx_skb->skb);
1999 static void nv_drain_tx(struct net_device *dev)
2001 struct fe_priv *np = netdev_priv(dev);
2004 for (i = 0; i < np->tx_ring_size; i++) {
2005 if (!nv_optimized(np)) {
2006 np->tx_ring.orig[i].flaglen = 0;
2007 np->tx_ring.orig[i].buf = 0;
2009 np->tx_ring.ex[i].flaglen = 0;
2010 np->tx_ring.ex[i].txvlan = 0;
2011 np->tx_ring.ex[i].bufhigh = 0;
2012 np->tx_ring.ex[i].buflow = 0;
2014 if (nv_release_txskb(np, &np->tx_skb[i])) {
2015 u64_stats_update_begin(&np->swstats_tx_syncp);
2016 np->stat_tx_dropped++;
2017 u64_stats_update_end(&np->swstats_tx_syncp);
2019 np->tx_skb[i].dma = 0;
2020 np->tx_skb[i].dma_len = 0;
2021 np->tx_skb[i].dma_single = 0;
2022 np->tx_skb[i].first_tx_desc = NULL;
2023 np->tx_skb[i].next_tx_ctx = NULL;
2025 np->tx_pkts_in_progress = 0;
2026 np->tx_change_owner = NULL;
2027 np->tx_end_flip = NULL;
2030 static void nv_drain_rx(struct net_device *dev)
2032 struct fe_priv *np = netdev_priv(dev);
2035 for (i = 0; i < np->rx_ring_size; i++) {
2036 if (!nv_optimized(np)) {
2037 np->rx_ring.orig[i].flaglen = 0;
2038 np->rx_ring.orig[i].buf = 0;
2040 np->rx_ring.ex[i].flaglen = 0;
2041 np->rx_ring.ex[i].txvlan = 0;
2042 np->rx_ring.ex[i].bufhigh = 0;
2043 np->rx_ring.ex[i].buflow = 0;
2046 if (np->rx_skb[i].skb) {
2047 dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
2048 (skb_end_pointer(np->rx_skb[i].skb) -
2049 np->rx_skb[i].skb->data),
2051 dev_kfree_skb(np->rx_skb[i].skb);
2052 np->rx_skb[i].skb = NULL;
2057 static void nv_drain_rxtx(struct net_device *dev)
2063 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2065 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2068 static void nv_legacybackoff_reseed(struct net_device *dev)
2070 u8 __iomem *base = get_hwbase(dev);
2075 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2076 get_random_bytes(&low, sizeof(low));
2077 reg |= low & NVREG_SLOTTIME_MASK;
2079 /* Need to stop tx before change takes effect.
2080 * Caller has already gained np->lock.
2082 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2086 writel(reg, base + NvRegSlotTime);
2092 /* Gear Backoff Seeds */
2093 #define BACKOFF_SEEDSET_ROWS 8
2094 #define BACKOFF_SEEDSET_LFSRS 15
2096 /* Known Good seed sets */
2097 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2098 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2099 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2100 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2101 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2102 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2103 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2104 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2105 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2107 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2108 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2109 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2110 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2111 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2112 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2113 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2114 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2115 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2117 static void nv_gear_backoff_reseed(struct net_device *dev)
2119 u8 __iomem *base = get_hwbase(dev);
2120 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2121 u32 temp, seedset, combinedSeed;
2124 /* Setup seed for free running LFSR */
2125 /* We are going to read the time stamp counter 3 times
2126 and swizzle bits around to increase randomness */
2127 get_random_bytes(&miniseed1, sizeof(miniseed1));
2128 miniseed1 &= 0x0fff;
2132 get_random_bytes(&miniseed2, sizeof(miniseed2));
2133 miniseed2 &= 0x0fff;
2136 miniseed2_reversed =
2137 ((miniseed2 & 0xF00) >> 8) |
2138 (miniseed2 & 0x0F0) |
2139 ((miniseed2 & 0x00F) << 8);
2141 get_random_bytes(&miniseed3, sizeof(miniseed3));
2142 miniseed3 &= 0x0fff;
2145 miniseed3_reversed =
2146 ((miniseed3 & 0xF00) >> 8) |
2147 (miniseed3 & 0x0F0) |
2148 ((miniseed3 & 0x00F) << 8);
2150 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2151 (miniseed2 ^ miniseed3_reversed);
2153 /* Seeds can not be zero */
2154 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2155 combinedSeed |= 0x08;
2156 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2157 combinedSeed |= 0x8000;
2159 /* No need to disable tx here */
2160 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2161 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2162 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2163 writel(temp, base + NvRegBackOffControl);
2165 /* Setup seeds for all gear LFSRs. */
2166 get_random_bytes(&seedset, sizeof(seedset));
2167 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2168 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2169 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2170 temp |= main_seedset[seedset][i-1] & 0x3ff;
2171 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2172 writel(temp, base + NvRegBackOffControl);
2177 * nv_start_xmit: dev->hard_start_xmit function
2178 * Called with netif_tx_lock held.
2180 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2182 struct fe_priv *np = netdev_priv(dev);
2184 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2185 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2189 u32 size = skb_headlen(skb);
2190 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2192 struct ring_desc *put_tx;
2193 struct ring_desc *start_tx;
2194 struct ring_desc *prev_tx;
2195 struct nv_skb_map *prev_tx_ctx;
2196 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2197 unsigned long flags;
2199 /* add fragments to entries count */
2200 for (i = 0; i < fragments; i++) {
2201 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2203 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2204 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2207 spin_lock_irqsave(&np->lock, flags);
2208 empty_slots = nv_get_empty_tx_slots(np);
2209 if (unlikely(empty_slots <= entries)) {
2210 netif_stop_queue(dev);
2212 spin_unlock_irqrestore(&np->lock, flags);
2213 return NETDEV_TX_BUSY;
2215 spin_unlock_irqrestore(&np->lock, flags);
2217 start_tx = put_tx = np->put_tx.orig;
2219 /* setup the header buffer */
2221 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2222 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2223 skb->data + offset, bcnt,
2225 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2226 np->put_tx_ctx->dma))) {
2227 /* on DMA mapping error - drop the packet */
2228 dev_kfree_skb_any(skb);
2229 u64_stats_update_begin(&np->swstats_tx_syncp);
2230 np->stat_tx_dropped++;
2231 u64_stats_update_end(&np->swstats_tx_syncp);
2232 return NETDEV_TX_OK;
2234 np->put_tx_ctx->dma_len = bcnt;
2235 np->put_tx_ctx->dma_single = 1;
2236 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2237 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2239 tx_flags = np->tx_flags;
2242 if (unlikely(put_tx++ == np->last_tx.orig))
2243 put_tx = np->tx_ring.orig;
2244 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2245 np->put_tx_ctx = np->tx_skb;
2248 /* setup the fragments */
2249 for (i = 0; i < fragments; i++) {
2250 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2251 u32 frag_size = skb_frag_size(frag);
2256 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2258 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2259 np->put_tx_ctx->dma = skb_frag_dma_map(
2264 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2265 np->put_tx_ctx->dma))) {
2267 /* Unwind the mapped fragments */
2269 nv_unmap_txskb(np, start_tx_ctx);
2270 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2271 tmp_tx_ctx = np->tx_skb;
2272 } while (tmp_tx_ctx != np->put_tx_ctx);
2273 dev_kfree_skb_any(skb);
2274 np->put_tx_ctx = start_tx_ctx;
2275 u64_stats_update_begin(&np->swstats_tx_syncp);
2276 np->stat_tx_dropped++;
2277 u64_stats_update_end(&np->swstats_tx_syncp);
2278 return NETDEV_TX_OK;
2281 np->put_tx_ctx->dma_len = bcnt;
2282 np->put_tx_ctx->dma_single = 0;
2283 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2284 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2288 if (unlikely(put_tx++ == np->last_tx.orig))
2289 put_tx = np->tx_ring.orig;
2290 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2291 np->put_tx_ctx = np->tx_skb;
2292 } while (frag_size);
2295 if (unlikely(put_tx == np->tx_ring.orig))
2296 prev_tx = np->last_tx.orig;
2298 prev_tx = put_tx - 1;
2300 if (unlikely(np->put_tx_ctx == np->tx_skb))
2301 prev_tx_ctx = np->last_tx_ctx;
2303 prev_tx_ctx = np->put_tx_ctx - 1;
2305 /* set last fragment flag */
2306 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2308 /* save skb in this slot's context area */
2309 prev_tx_ctx->skb = skb;
2311 if (skb_is_gso(skb))
2312 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2314 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2315 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2317 spin_lock_irqsave(&np->lock, flags);
2320 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2322 netdev_sent_queue(np->dev, skb->len);
2324 skb_tx_timestamp(skb);
2326 np->put_tx.orig = put_tx;
2328 spin_unlock_irqrestore(&np->lock, flags);
2330 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2331 return NETDEV_TX_OK;
2334 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2335 struct net_device *dev)
2337 struct fe_priv *np = netdev_priv(dev);
2340 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2344 u32 size = skb_headlen(skb);
2345 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2347 struct ring_desc_ex *put_tx;
2348 struct ring_desc_ex *start_tx;
2349 struct ring_desc_ex *prev_tx;
2350 struct nv_skb_map *prev_tx_ctx;
2351 struct nv_skb_map *start_tx_ctx = NULL;
2352 struct nv_skb_map *tmp_tx_ctx = NULL;
2353 unsigned long flags;
2355 /* add fragments to entries count */
2356 for (i = 0; i < fragments; i++) {
2357 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2359 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2360 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2363 spin_lock_irqsave(&np->lock, flags);
2364 empty_slots = nv_get_empty_tx_slots(np);
2365 if (unlikely(empty_slots <= entries)) {
2366 netif_stop_queue(dev);
2368 spin_unlock_irqrestore(&np->lock, flags);
2369 return NETDEV_TX_BUSY;
2371 spin_unlock_irqrestore(&np->lock, flags);
2373 start_tx = put_tx = np->put_tx.ex;
2374 start_tx_ctx = np->put_tx_ctx;
2376 /* setup the header buffer */
2378 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2379 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2380 skb->data + offset, bcnt,
2382 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2383 np->put_tx_ctx->dma))) {
2384 /* on DMA mapping error - drop the packet */
2385 dev_kfree_skb_any(skb);
2386 u64_stats_update_begin(&np->swstats_tx_syncp);
2387 np->stat_tx_dropped++;
2388 u64_stats_update_end(&np->swstats_tx_syncp);
2389 return NETDEV_TX_OK;
2391 np->put_tx_ctx->dma_len = bcnt;
2392 np->put_tx_ctx->dma_single = 1;
2393 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2394 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2395 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2397 tx_flags = NV_TX2_VALID;
2400 if (unlikely(put_tx++ == np->last_tx.ex))
2401 put_tx = np->tx_ring.ex;
2402 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2403 np->put_tx_ctx = np->tx_skb;
2406 /* setup the fragments */
2407 for (i = 0; i < fragments; i++) {
2408 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2409 u32 frag_size = skb_frag_size(frag);
2413 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2415 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2416 np->put_tx_ctx->dma = skb_frag_dma_map(
2422 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2423 np->put_tx_ctx->dma))) {
2425 /* Unwind the mapped fragments */
2427 nv_unmap_txskb(np, start_tx_ctx);
2428 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2429 tmp_tx_ctx = np->tx_skb;
2430 } while (tmp_tx_ctx != np->put_tx_ctx);
2431 dev_kfree_skb_any(skb);
2432 np->put_tx_ctx = start_tx_ctx;
2433 u64_stats_update_begin(&np->swstats_tx_syncp);
2434 np->stat_tx_dropped++;
2435 u64_stats_update_end(&np->swstats_tx_syncp);
2436 return NETDEV_TX_OK;
2438 np->put_tx_ctx->dma_len = bcnt;
2439 np->put_tx_ctx->dma_single = 0;
2440 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2441 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2442 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2446 if (unlikely(put_tx++ == np->last_tx.ex))
2447 put_tx = np->tx_ring.ex;
2448 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2449 np->put_tx_ctx = np->tx_skb;
2450 } while (frag_size);
2453 if (unlikely(put_tx == np->tx_ring.ex))
2454 prev_tx = np->last_tx.ex;
2456 prev_tx = put_tx - 1;
2458 if (unlikely(np->put_tx_ctx == np->tx_skb))
2459 prev_tx_ctx = np->last_tx_ctx;
2461 prev_tx_ctx = np->put_tx_ctx - 1;
2463 /* set last fragment flag */
2464 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2466 /* save skb in this slot's context area */
2467 prev_tx_ctx->skb = skb;
2469 if (skb_is_gso(skb))
2470 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2472 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2473 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2476 if (skb_vlan_tag_present(skb))
2477 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2478 skb_vlan_tag_get(skb));
2480 start_tx->txvlan = 0;
2482 spin_lock_irqsave(&np->lock, flags);
2485 /* Limit the number of outstanding tx. Setup all fragments, but
2486 * do not set the VALID bit on the first descriptor. Save a pointer
2487 * to that descriptor and also for next skb_map element.
2490 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2491 if (!np->tx_change_owner)
2492 np->tx_change_owner = start_tx_ctx;
2494 /* remove VALID bit */
2495 tx_flags &= ~NV_TX2_VALID;
2496 start_tx_ctx->first_tx_desc = start_tx;
2497 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2498 np->tx_end_flip = np->put_tx_ctx;
2500 np->tx_pkts_in_progress++;
2505 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2507 netdev_sent_queue(np->dev, skb->len);
2509 skb_tx_timestamp(skb);
2511 np->put_tx.ex = put_tx;
2513 spin_unlock_irqrestore(&np->lock, flags);
2515 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2516 return NETDEV_TX_OK;
2519 static inline void nv_tx_flip_ownership(struct net_device *dev)
2521 struct fe_priv *np = netdev_priv(dev);
2523 np->tx_pkts_in_progress--;
2524 if (np->tx_change_owner) {
2525 np->tx_change_owner->first_tx_desc->flaglen |=
2526 cpu_to_le32(NV_TX2_VALID);
2527 np->tx_pkts_in_progress++;
2529 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2530 if (np->tx_change_owner == np->tx_end_flip)
2531 np->tx_change_owner = NULL;
2533 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2538 * nv_tx_done: check for completed packets, release the skbs.
2540 * Caller must own np->lock.
2542 static int nv_tx_done(struct net_device *dev, int limit)
2544 struct fe_priv *np = netdev_priv(dev);
2547 struct ring_desc *orig_get_tx = np->get_tx.orig;
2548 unsigned int bytes_compl = 0;
2550 while ((np->get_tx.orig != np->put_tx.orig) &&
2551 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2552 (tx_work < limit)) {
2554 nv_unmap_txskb(np, np->get_tx_ctx);
2556 if (np->desc_ver == DESC_VER_1) {
2557 if (flags & NV_TX_LASTPACKET) {
2558 if (unlikely(flags & NV_TX_ERROR)) {
2559 if ((flags & NV_TX_RETRYERROR)
2560 && !(flags & NV_TX_RETRYCOUNT_MASK))
2561 nv_legacybackoff_reseed(dev);
2563 u64_stats_update_begin(&np->swstats_tx_syncp);
2564 np->stat_tx_packets++;
2565 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2566 u64_stats_update_end(&np->swstats_tx_syncp);
2568 bytes_compl += np->get_tx_ctx->skb->len;
2569 dev_kfree_skb_any(np->get_tx_ctx->skb);
2570 np->get_tx_ctx->skb = NULL;
2574 if (flags & NV_TX2_LASTPACKET) {
2575 if (unlikely(flags & NV_TX2_ERROR)) {
2576 if ((flags & NV_TX2_RETRYERROR)
2577 && !(flags & NV_TX2_RETRYCOUNT_MASK))
2578 nv_legacybackoff_reseed(dev);
2580 u64_stats_update_begin(&np->swstats_tx_syncp);
2581 np->stat_tx_packets++;
2582 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2583 u64_stats_update_end(&np->swstats_tx_syncp);
2585 bytes_compl += np->get_tx_ctx->skb->len;
2586 dev_kfree_skb_any(np->get_tx_ctx->skb);
2587 np->get_tx_ctx->skb = NULL;
2591 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2592 np->get_tx.orig = np->tx_ring.orig;
2593 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2594 np->get_tx_ctx = np->tx_skb;
2597 netdev_completed_queue(np->dev, tx_work, bytes_compl);
2599 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2601 netif_wake_queue(dev);
2606 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2608 struct fe_priv *np = netdev_priv(dev);
2611 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2612 unsigned long bytes_cleaned = 0;
2614 while ((np->get_tx.ex != np->put_tx.ex) &&
2615 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2616 (tx_work < limit)) {
2618 nv_unmap_txskb(np, np->get_tx_ctx);
2620 if (flags & NV_TX2_LASTPACKET) {
2621 if (unlikely(flags & NV_TX2_ERROR)) {
2622 if ((flags & NV_TX2_RETRYERROR)
2623 && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2624 if (np->driver_data & DEV_HAS_GEAR_MODE)
2625 nv_gear_backoff_reseed(dev);
2627 nv_legacybackoff_reseed(dev);
2630 u64_stats_update_begin(&np->swstats_tx_syncp);
2631 np->stat_tx_packets++;
2632 np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2633 u64_stats_update_end(&np->swstats_tx_syncp);
2636 bytes_cleaned += np->get_tx_ctx->skb->len;
2637 dev_kfree_skb_any(np->get_tx_ctx->skb);
2638 np->get_tx_ctx->skb = NULL;
2642 nv_tx_flip_ownership(dev);
2645 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2646 np->get_tx.ex = np->tx_ring.ex;
2647 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2648 np->get_tx_ctx = np->tx_skb;
2651 netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2653 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2655 netif_wake_queue(dev);
2661 * nv_tx_timeout: dev->tx_timeout function
2662 * Called with netif_tx_lock held.
2664 static void nv_tx_timeout(struct net_device *dev)
2666 struct fe_priv *np = netdev_priv(dev);
2667 u8 __iomem *base = get_hwbase(dev);
2669 union ring_type put_tx;
2672 if (np->msi_flags & NV_MSI_X_ENABLED)
2673 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2675 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2677 netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2679 if (unlikely(debug_tx_timeout)) {
2682 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2683 netdev_info(dev, "Dumping tx registers\n");
2684 for (i = 0; i <= np->register_size; i += 32) {
2686 "%3x: %08x %08x %08x %08x "
2687 "%08x %08x %08x %08x\n",
2689 readl(base + i + 0), readl(base + i + 4),
2690 readl(base + i + 8), readl(base + i + 12),
2691 readl(base + i + 16), readl(base + i + 20),
2692 readl(base + i + 24), readl(base + i + 28));
2694 netdev_info(dev, "Dumping tx ring\n");
2695 for (i = 0; i < np->tx_ring_size; i += 4) {
2696 if (!nv_optimized(np)) {
2698 "%03x: %08x %08x // %08x %08x "
2699 "// %08x %08x // %08x %08x\n",
2701 le32_to_cpu(np->tx_ring.orig[i].buf),
2702 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2703 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2704 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2705 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2706 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2707 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2708 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2711 "%03x: %08x %08x %08x "
2712 "// %08x %08x %08x "
2713 "// %08x %08x %08x "
2714 "// %08x %08x %08x\n",
2716 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2717 le32_to_cpu(np->tx_ring.ex[i].buflow),
2718 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2719 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2720 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2721 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2722 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2723 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2724 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2725 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2726 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2727 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2732 spin_lock_irq(&np->lock);
2734 /* 1) stop tx engine */
2737 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2738 saved_tx_limit = np->tx_limit;
2739 np->tx_limit = 0; /* prevent giving HW any limited pkts */
2740 np->tx_stop = 0; /* prevent waking tx queue */
2741 if (!nv_optimized(np))
2742 nv_tx_done(dev, np->tx_ring_size);
2744 nv_tx_done_optimized(dev, np->tx_ring_size);
2746 /* save current HW position */
2747 if (np->tx_change_owner)
2748 put_tx.ex = np->tx_change_owner->first_tx_desc;
2750 put_tx = np->put_tx;
2752 /* 3) clear all tx state */
2756 /* 4) restore state to current HW position */
2757 np->get_tx = np->put_tx = put_tx;
2758 np->tx_limit = saved_tx_limit;
2760 /* 5) restart tx engine */
2762 netif_wake_queue(dev);
2763 spin_unlock_irq(&np->lock);
2767 * Called when the nic notices a mismatch between the actual data len on the
2768 * wire and the len indicated in the 802 header
2770 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2772 int hdrlen; /* length of the 802 header */
2773 int protolen; /* length as stored in the proto field */
2775 /* 1) calculate len according to header */
2776 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2777 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2780 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2783 if (protolen > ETH_DATA_LEN)
2784 return datalen; /* Value in proto field not a len, no checks possible */
2787 /* consistency checks: */
2788 if (datalen > ETH_ZLEN) {
2789 if (datalen >= protolen) {
2790 /* more data on wire than in 802 header, trim of
2795 /* less data on wire than mentioned in header.
2796 * Discard the packet.
2801 /* short packet. Accept only if 802 values are also short */
2802 if (protolen > ETH_ZLEN) {
2809 static int nv_rx_process(struct net_device *dev, int limit)
2811 struct fe_priv *np = netdev_priv(dev);
2814 struct sk_buff *skb;
2817 while ((np->get_rx.orig != np->put_rx.orig) &&
2818 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2819 (rx_work < limit)) {
2822 * the packet is for us - immediately tear down the pci mapping.
2823 * TODO: check if a prefetch of the first cacheline improves
2826 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2827 np->get_rx_ctx->dma_len,
2829 skb = np->get_rx_ctx->skb;
2830 np->get_rx_ctx->skb = NULL;
2832 /* look at what we actually got: */
2833 if (np->desc_ver == DESC_VER_1) {
2834 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2835 len = flags & LEN_MASK_V1;
2836 if (unlikely(flags & NV_RX_ERROR)) {
2837 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2838 len = nv_getlen(dev, skb->data, len);
2844 /* framing errors are soft errors */
2845 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2846 if (flags & NV_RX_SUBTRACT1)
2849 /* the rest are hard errors */
2851 if (flags & NV_RX_MISSEDFRAME) {
2852 u64_stats_update_begin(&np->swstats_rx_syncp);
2853 np->stat_rx_missed_errors++;
2854 u64_stats_update_end(&np->swstats_rx_syncp);
2865 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2866 len = flags & LEN_MASK_V2;
2867 if (unlikely(flags & NV_RX2_ERROR)) {
2868 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2869 len = nv_getlen(dev, skb->data, len);
2875 /* framing errors are soft errors */
2876 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2877 if (flags & NV_RX2_SUBTRACT1)
2880 /* the rest are hard errors */
2886 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2887 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2888 skb->ip_summed = CHECKSUM_UNNECESSARY;
2894 /* got a valid packet - forward it to the network core */
2896 skb->protocol = eth_type_trans(skb, dev);
2897 napi_gro_receive(&np->napi, skb);
2898 u64_stats_update_begin(&np->swstats_rx_syncp);
2899 np->stat_rx_packets++;
2900 np->stat_rx_bytes += len;
2901 u64_stats_update_end(&np->swstats_rx_syncp);
2903 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2904 np->get_rx.orig = np->rx_ring.orig;
2905 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2906 np->get_rx_ctx = np->rx_skb;
2914 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2916 struct fe_priv *np = netdev_priv(dev);
2920 struct sk_buff *skb;
2923 while ((np->get_rx.ex != np->put_rx.ex) &&
2924 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2925 (rx_work < limit)) {
2928 * the packet is for us - immediately tear down the pci mapping.
2929 * TODO: check if a prefetch of the first cacheline improves
2932 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2933 np->get_rx_ctx->dma_len,
2935 skb = np->get_rx_ctx->skb;
2936 np->get_rx_ctx->skb = NULL;
2938 /* look at what we actually got: */
2939 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2940 len = flags & LEN_MASK_V2;
2941 if (unlikely(flags & NV_RX2_ERROR)) {
2942 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2943 len = nv_getlen(dev, skb->data, len);
2949 /* framing errors are soft errors */
2950 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2951 if (flags & NV_RX2_SUBTRACT1)
2954 /* the rest are hard errors */
2961 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2962 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2963 skb->ip_summed = CHECKSUM_UNNECESSARY;
2965 /* got a valid packet - forward it to the network core */
2967 skb->protocol = eth_type_trans(skb, dev);
2968 prefetch(skb->data);
2970 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2973 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2974 * here. Even if vlan rx accel is disabled,
2975 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2977 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2978 vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2979 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2981 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2983 napi_gro_receive(&np->napi, skb);
2984 u64_stats_update_begin(&np->swstats_rx_syncp);
2985 np->stat_rx_packets++;
2986 np->stat_rx_bytes += len;
2987 u64_stats_update_end(&np->swstats_rx_syncp);
2992 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2993 np->get_rx.ex = np->rx_ring.ex;
2994 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2995 np->get_rx_ctx = np->rx_skb;
3003 static void set_bufsize(struct net_device *dev)
3005 struct fe_priv *np = netdev_priv(dev);
3007 if (dev->mtu <= ETH_DATA_LEN)
3008 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
3010 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3014 * nv_change_mtu: dev->change_mtu function
3015 * Called with dev_base_lock held for read.
3017 static int nv_change_mtu(struct net_device *dev, int new_mtu)
3019 struct fe_priv *np = netdev_priv(dev);
3025 /* return early if the buffer sizes will not change */
3026 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3029 /* synchronized against open : rtnl_lock() held by caller */
3030 if (netif_running(dev)) {
3031 u8 __iomem *base = get_hwbase(dev);
3033 * It seems that the nic preloads valid ring entries into an
3034 * internal buffer. The procedure for flushing everything is
3035 * guessed, there is probably a simpler approach.
3036 * Changing the MTU is a rare event, it shouldn't matter.
3038 nv_disable_irq(dev);
3039 nv_napi_disable(dev);
3040 netif_tx_lock_bh(dev);
3041 netif_addr_lock(dev);
3042 spin_lock(&np->lock);
3046 /* drain rx queue */
3048 /* reinit driver view of the rx queue */
3050 if (nv_init_ring(dev)) {
3051 if (!np->in_shutdown)
3052 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3054 /* reinit nic view of the rx queue */
3055 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3056 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3057 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3058 base + NvRegRingSizes);
3060 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3063 /* restart rx engine */
3065 spin_unlock(&np->lock);
3066 netif_addr_unlock(dev);
3067 netif_tx_unlock_bh(dev);
3068 nv_napi_enable(dev);
3074 static void nv_copy_mac_to_hw(struct net_device *dev)
3076 u8 __iomem *base = get_hwbase(dev);
3079 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3080 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3081 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3083 writel(mac[0], base + NvRegMacAddrA);
3084 writel(mac[1], base + NvRegMacAddrB);
3088 * nv_set_mac_address: dev->set_mac_address function
3089 * Called with rtnl_lock() held.
3091 static int nv_set_mac_address(struct net_device *dev, void *addr)
3093 struct fe_priv *np = netdev_priv(dev);
3094 struct sockaddr *macaddr = (struct sockaddr *)addr;
3096 if (!is_valid_ether_addr(macaddr->sa_data))
3097 return -EADDRNOTAVAIL;
3099 /* synchronized against open : rtnl_lock() held by caller */
3100 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3102 if (netif_running(dev)) {
3103 netif_tx_lock_bh(dev);
3104 netif_addr_lock(dev);
3105 spin_lock_irq(&np->lock);
3107 /* stop rx engine */
3110 /* set mac address */
3111 nv_copy_mac_to_hw(dev);
3113 /* restart rx engine */
3115 spin_unlock_irq(&np->lock);
3116 netif_addr_unlock(dev);
3117 netif_tx_unlock_bh(dev);
3119 nv_copy_mac_to_hw(dev);
3125 * nv_set_multicast: dev->set_multicast function
3126 * Called with netif_tx_lock held.
3128 static void nv_set_multicast(struct net_device *dev)
3130 struct fe_priv *np = netdev_priv(dev);
3131 u8 __iomem *base = get_hwbase(dev);
3134 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3136 memset(addr, 0, sizeof(addr));
3137 memset(mask, 0, sizeof(mask));
3139 if (dev->flags & IFF_PROMISC) {
3140 pff |= NVREG_PFF_PROMISC;
3142 pff |= NVREG_PFF_MYADDR;
3144 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3148 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3149 if (dev->flags & IFF_ALLMULTI) {
3150 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3152 struct netdev_hw_addr *ha;
3154 netdev_for_each_mc_addr(ha, dev) {
3155 unsigned char *hw_addr = ha->addr;
3158 a = le32_to_cpu(*(__le32 *) hw_addr);
3159 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3166 addr[0] = alwaysOn[0];
3167 addr[1] = alwaysOn[1];
3168 mask[0] = alwaysOn[0] | alwaysOff[0];
3169 mask[1] = alwaysOn[1] | alwaysOff[1];
3171 mask[0] = NVREG_MCASTMASKA_NONE;
3172 mask[1] = NVREG_MCASTMASKB_NONE;
3175 addr[0] |= NVREG_MCASTADDRA_FORCE;
3176 pff |= NVREG_PFF_ALWAYS;
3177 spin_lock_irq(&np->lock);
3179 writel(addr[0], base + NvRegMulticastAddrA);
3180 writel(addr[1], base + NvRegMulticastAddrB);
3181 writel(mask[0], base + NvRegMulticastMaskA);
3182 writel(mask[1], base + NvRegMulticastMaskB);
3183 writel(pff, base + NvRegPacketFilterFlags);
3185 spin_unlock_irq(&np->lock);
3188 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3190 struct fe_priv *np = netdev_priv(dev);
3191 u8 __iomem *base = get_hwbase(dev);
3193 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3195 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3196 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3197 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3198 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3199 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3201 writel(pff, base + NvRegPacketFilterFlags);
3204 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3205 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3206 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3207 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3208 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3209 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3210 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3211 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3212 /* limit the number of tx pause frames to a default of 8 */
3213 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3215 writel(pause_enable, base + NvRegTxPauseFrame);
3216 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3217 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3219 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3220 writel(regmisc, base + NvRegMisc1);
3225 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3227 struct fe_priv *np = netdev_priv(dev);
3228 u8 __iomem *base = get_hwbase(dev);
3232 np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3233 np->duplex = duplex;
3235 /* see if gigabit phy */
3236 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3237 if (mii_status & PHY_GIGABIT) {
3238 np->gigabit = PHY_GIGABIT;
3239 phyreg = readl(base + NvRegSlotTime);
3240 phyreg &= ~(0x3FF00);
3241 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3242 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3243 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3244 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3245 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3246 phyreg |= NVREG_SLOTTIME_1000_FULL;
3247 writel(phyreg, base + NvRegSlotTime);
3250 phyreg = readl(base + NvRegPhyInterface);
3251 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3252 if (np->duplex == 0)
3254 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3256 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3257 NVREG_LINKSPEED_1000)
3259 writel(phyreg, base + NvRegPhyInterface);
3261 if (phyreg & PHY_RGMII) {
3262 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3263 NVREG_LINKSPEED_1000)
3264 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3266 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3268 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3270 writel(txreg, base + NvRegTxDeferral);
3272 if (np->desc_ver == DESC_VER_1) {
3273 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3275 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3276 NVREG_LINKSPEED_1000)
3277 txreg = NVREG_TX_WM_DESC2_3_1000;
3279 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3281 writel(txreg, base + NvRegTxWatermark);
3283 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3286 writel(np->linkspeed, base + NvRegLinkSpeed);
3291 * nv_update_linkspeed - Setup the MAC according to the link partner
3292 * @dev: Network device to be configured
3294 * The function queries the PHY and checks if there is a link partner.
3295 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3296 * set to 10 MBit HD.
3298 * The function returns 0 if there is no link partner and 1 if there is
3299 * a good link partner.
3301 static int nv_update_linkspeed(struct net_device *dev)
3303 struct fe_priv *np = netdev_priv(dev);
3304 u8 __iomem *base = get_hwbase(dev);
3307 int adv_lpa, adv_pause, lpa_pause;
3308 int newls = np->linkspeed;
3309 int newdup = np->duplex;
3313 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3317 /* If device loopback is enabled, set carrier on and enable max link
3320 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3321 if (bmcr & BMCR_LOOPBACK) {
3322 if (netif_running(dev)) {
3323 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3324 if (!netif_carrier_ok(dev))
3325 netif_carrier_on(dev);
3330 /* BMSR_LSTATUS is latched, read it twice:
3331 * we want the current value.
3333 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3334 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3336 if (!(mii_status & BMSR_LSTATUS)) {
3337 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3343 if (np->autoneg == 0) {
3344 if (np->fixed_mode & LPA_100FULL) {
3345 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3347 } else if (np->fixed_mode & LPA_100HALF) {
3348 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3350 } else if (np->fixed_mode & LPA_10FULL) {
3351 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3354 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3360 /* check auto negotiation is complete */
3361 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3362 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3363 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3369 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3370 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3373 if (np->gigabit == PHY_GIGABIT) {
3374 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3375 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3377 if ((control_1000 & ADVERTISE_1000FULL) &&
3378 (status_1000 & LPA_1000FULL)) {
3379 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3385 /* FIXME: handle parallel detection properly */
3386 adv_lpa = lpa & adv;
3387 if (adv_lpa & LPA_100FULL) {
3388 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3390 } else if (adv_lpa & LPA_100HALF) {
3391 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3393 } else if (adv_lpa & LPA_10FULL) {
3394 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3396 } else if (adv_lpa & LPA_10HALF) {
3397 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3400 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3405 if (np->duplex == newdup && np->linkspeed == newls)
3408 np->duplex = newdup;
3409 np->linkspeed = newls;
3411 /* The transmitter and receiver must be restarted for safe update */
3412 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3413 txrxFlags |= NV_RESTART_TX;
3416 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3417 txrxFlags |= NV_RESTART_RX;
3421 if (np->gigabit == PHY_GIGABIT) {
3422 phyreg = readl(base + NvRegSlotTime);
3423 phyreg &= ~(0x3FF00);
3424 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3425 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3426 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3427 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3428 phyreg |= NVREG_SLOTTIME_1000_FULL;
3429 writel(phyreg, base + NvRegSlotTime);
3432 phyreg = readl(base + NvRegPhyInterface);
3433 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3434 if (np->duplex == 0)
3436 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3438 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3440 writel(phyreg, base + NvRegPhyInterface);
3442 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3443 if (phyreg & PHY_RGMII) {
3444 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3445 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3447 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3448 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3449 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3451 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3453 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3457 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3458 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3460 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3462 writel(txreg, base + NvRegTxDeferral);
3464 if (np->desc_ver == DESC_VER_1) {
3465 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3467 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3468 txreg = NVREG_TX_WM_DESC2_3_1000;
3470 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3472 writel(txreg, base + NvRegTxWatermark);
3474 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3477 writel(np->linkspeed, base + NvRegLinkSpeed);
3481 /* setup pause frame */
3482 if (netif_running(dev) && (np->duplex != 0)) {
3483 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3484 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3485 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3487 switch (adv_pause) {
3488 case ADVERTISE_PAUSE_CAP:
3489 if (lpa_pause & LPA_PAUSE_CAP) {
3490 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3491 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3492 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3495 case ADVERTISE_PAUSE_ASYM:
3496 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3497 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3499 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3500 if (lpa_pause & LPA_PAUSE_CAP) {
3501 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3502 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3503 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3505 if (lpa_pause == LPA_PAUSE_ASYM)
3506 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3510 pause_flags = np->pause_flags;
3513 nv_update_pause(dev, pause_flags);
3515 if (txrxFlags & NV_RESTART_TX)
3517 if (txrxFlags & NV_RESTART_RX)
3523 static void nv_linkchange(struct net_device *dev)
3525 if (nv_update_linkspeed(dev)) {
3526 if (!netif_carrier_ok(dev)) {
3527 netif_carrier_on(dev);
3528 netdev_info(dev, "link up\n");
3529 nv_txrx_gate(dev, false);
3533 if (netif_carrier_ok(dev)) {
3534 netif_carrier_off(dev);
3535 netdev_info(dev, "link down\n");
3536 nv_txrx_gate(dev, true);
3542 static void nv_link_irq(struct net_device *dev)
3544 u8 __iomem *base = get_hwbase(dev);
3547 miistat = readl(base + NvRegMIIStatus);
3548 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3550 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3554 static void nv_msi_workaround(struct fe_priv *np)
3557 /* Need to toggle the msi irq mask within the ethernet device,
3558 * otherwise, future interrupts will not be detected.
3560 if (np->msi_flags & NV_MSI_ENABLED) {
3561 u8 __iomem *base = np->base;
3563 writel(0, base + NvRegMSIIrqMask);
3564 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3568 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3570 struct fe_priv *np = netdev_priv(dev);
3572 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3573 if (total_work > NV_DYNAMIC_THRESHOLD) {
3574 /* transition to poll based interrupts */
3575 np->quiet_count = 0;
3576 if (np->irqmask != NVREG_IRQMASK_CPU) {
3577 np->irqmask = NVREG_IRQMASK_CPU;
3581 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3584 /* reached a period of low activity, switch
3585 to per tx/rx packet interrupts */
3586 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3587 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3596 static irqreturn_t nv_nic_irq(int foo, void *data)
3598 struct net_device *dev = (struct net_device *) data;
3599 struct fe_priv *np = netdev_priv(dev);
3600 u8 __iomem *base = get_hwbase(dev);
3602 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3603 np->events = readl(base + NvRegIrqStatus);
3604 writel(np->events, base + NvRegIrqStatus);
3606 np->events = readl(base + NvRegMSIXIrqStatus);
3607 writel(np->events, base + NvRegMSIXIrqStatus);
3609 if (!(np->events & np->irqmask))
3612 nv_msi_workaround(np);
3614 if (napi_schedule_prep(&np->napi)) {
3616 * Disable further irq's (msix not enabled with napi)
3618 writel(0, base + NvRegIrqMask);
3619 __napi_schedule(&np->napi);
3625 /* All _optimized functions are used to help increase performance
3626 * (reduce CPU and increase throughput). They use descripter version 3,
3627 * compiler directives, and reduce memory accesses.
3629 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3631 struct net_device *dev = (struct net_device *) data;
3632 struct fe_priv *np = netdev_priv(dev);
3633 u8 __iomem *base = get_hwbase(dev);
3635 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3636 np->events = readl(base + NvRegIrqStatus);
3637 writel(np->events, base + NvRegIrqStatus);
3639 np->events = readl(base + NvRegMSIXIrqStatus);
3640 writel(np->events, base + NvRegMSIXIrqStatus);
3642 if (!(np->events & np->irqmask))
3645 nv_msi_workaround(np);
3647 if (napi_schedule_prep(&np->napi)) {
3649 * Disable further irq's (msix not enabled with napi)
3651 writel(0, base + NvRegIrqMask);
3652 __napi_schedule(&np->napi);
3658 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3660 struct net_device *dev = (struct net_device *) data;
3661 struct fe_priv *np = netdev_priv(dev);
3662 u8 __iomem *base = get_hwbase(dev);
3665 unsigned long flags;
3668 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3669 writel(events, base + NvRegMSIXIrqStatus);
3670 netdev_dbg(dev, "tx irq events: %08x\n", events);
3671 if (!(events & np->irqmask))
3674 spin_lock_irqsave(&np->lock, flags);
3675 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3676 spin_unlock_irqrestore(&np->lock, flags);
3678 if (unlikely(i > max_interrupt_work)) {
3679 spin_lock_irqsave(&np->lock, flags);
3680 /* disable interrupts on the nic */
3681 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3684 if (!np->in_shutdown) {
3685 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3686 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3688 spin_unlock_irqrestore(&np->lock, flags);
3689 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3696 return IRQ_RETVAL(i);
3699 static int nv_napi_poll(struct napi_struct *napi, int budget)
3701 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3702 struct net_device *dev = np->dev;
3703 u8 __iomem *base = get_hwbase(dev);
3704 unsigned long flags;
3706 int rx_count, tx_work = 0, rx_work = 0;
3709 if (!nv_optimized(np)) {
3710 spin_lock_irqsave(&np->lock, flags);
3711 tx_work += nv_tx_done(dev, np->tx_ring_size);
3712 spin_unlock_irqrestore(&np->lock, flags);
3714 rx_count = nv_rx_process(dev, budget - rx_work);
3715 retcode = nv_alloc_rx(dev);
3717 spin_lock_irqsave(&np->lock, flags);
3718 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3719 spin_unlock_irqrestore(&np->lock, flags);
3721 rx_count = nv_rx_process_optimized(dev,
3723 retcode = nv_alloc_rx_optimized(dev);
3725 } while (retcode == 0 &&
3726 rx_count > 0 && (rx_work += rx_count) < budget);
3729 spin_lock_irqsave(&np->lock, flags);
3730 if (!np->in_shutdown)
3731 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3732 spin_unlock_irqrestore(&np->lock, flags);
3735 nv_change_interrupt_mode(dev, tx_work + rx_work);
3737 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3738 spin_lock_irqsave(&np->lock, flags);
3740 spin_unlock_irqrestore(&np->lock, flags);
3742 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3743 spin_lock_irqsave(&np->lock, flags);
3745 spin_unlock_irqrestore(&np->lock, flags);
3746 np->link_timeout = jiffies + LINK_TIMEOUT;
3748 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3749 spin_lock_irqsave(&np->lock, flags);
3750 if (!np->in_shutdown) {
3751 np->nic_poll_irq = np->irqmask;
3752 np->recover_error = 1;
3753 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3755 spin_unlock_irqrestore(&np->lock, flags);
3756 napi_complete(napi);
3760 if (rx_work < budget) {
3761 /* re-enable interrupts
3762 (msix not enabled in napi) */
3763 napi_complete_done(napi, rx_work);
3765 writel(np->irqmask, base + NvRegIrqMask);
3770 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3772 struct net_device *dev = (struct net_device *) data;
3773 struct fe_priv *np = netdev_priv(dev);
3774 u8 __iomem *base = get_hwbase(dev);
3777 unsigned long flags;
3780 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3781 writel(events, base + NvRegMSIXIrqStatus);
3782 netdev_dbg(dev, "rx irq events: %08x\n", events);
3783 if (!(events & np->irqmask))
3786 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3787 if (unlikely(nv_alloc_rx_optimized(dev))) {
3788 spin_lock_irqsave(&np->lock, flags);
3789 if (!np->in_shutdown)
3790 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3791 spin_unlock_irqrestore(&np->lock, flags);
3795 if (unlikely(i > max_interrupt_work)) {
3796 spin_lock_irqsave(&np->lock, flags);
3797 /* disable interrupts on the nic */
3798 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3801 if (!np->in_shutdown) {
3802 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3803 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3805 spin_unlock_irqrestore(&np->lock, flags);
3806 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3812 return IRQ_RETVAL(i);
3815 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3817 struct net_device *dev = (struct net_device *) data;
3818 struct fe_priv *np = netdev_priv(dev);
3819 u8 __iomem *base = get_hwbase(dev);
3822 unsigned long flags;
3825 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3826 writel(events, base + NvRegMSIXIrqStatus);
3827 netdev_dbg(dev, "irq events: %08x\n", events);
3828 if (!(events & np->irqmask))
3831 /* check tx in case we reached max loop limit in tx isr */
3832 spin_lock_irqsave(&np->lock, flags);
3833 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3834 spin_unlock_irqrestore(&np->lock, flags);
3836 if (events & NVREG_IRQ_LINK) {
3837 spin_lock_irqsave(&np->lock, flags);
3839 spin_unlock_irqrestore(&np->lock, flags);
3841 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3842 spin_lock_irqsave(&np->lock, flags);
3844 spin_unlock_irqrestore(&np->lock, flags);
3845 np->link_timeout = jiffies + LINK_TIMEOUT;
3847 if (events & NVREG_IRQ_RECOVER_ERROR) {
3848 spin_lock_irqsave(&np->lock, flags);
3849 /* disable interrupts on the nic */
3850 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3853 if (!np->in_shutdown) {
3854 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3855 np->recover_error = 1;
3856 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3858 spin_unlock_irqrestore(&np->lock, flags);
3861 if (unlikely(i > max_interrupt_work)) {
3862 spin_lock_irqsave(&np->lock, flags);
3863 /* disable interrupts on the nic */
3864 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3867 if (!np->in_shutdown) {
3868 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3869 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3871 spin_unlock_irqrestore(&np->lock, flags);
3872 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3879 return IRQ_RETVAL(i);
3882 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3884 struct net_device *dev = (struct net_device *) data;
3885 struct fe_priv *np = netdev_priv(dev);
3886 u8 __iomem *base = get_hwbase(dev);
3889 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3890 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3891 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3893 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3894 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3897 if (!(events & NVREG_IRQ_TIMER))
3898 return IRQ_RETVAL(0);
3900 nv_msi_workaround(np);
3902 spin_lock(&np->lock);
3904 spin_unlock(&np->lock);
3906 return IRQ_RETVAL(1);
3909 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3911 u8 __iomem *base = get_hwbase(dev);
3915 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3916 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3917 * the remaining 8 interrupts.
3919 for (i = 0; i < 8; i++) {
3920 if ((irqmask >> i) & 0x1)
3921 msixmap |= vector << (i << 2);
3923 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3926 for (i = 0; i < 8; i++) {
3927 if ((irqmask >> (i + 8)) & 0x1)
3928 msixmap |= vector << (i << 2);
3930 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3933 static int nv_request_irq(struct net_device *dev, int intr_test)
3935 struct fe_priv *np = get_nvpriv(dev);
3936 u8 __iomem *base = get_hwbase(dev);
3939 irqreturn_t (*handler)(int foo, void *data);
3942 handler = nv_nic_irq_test;
3944 if (nv_optimized(np))
3945 handler = nv_nic_irq_optimized;
3947 handler = nv_nic_irq;
3950 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3951 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3952 np->msi_x_entry[i].entry = i;
3953 ret = pci_enable_msix_range(np->pci_dev,
3955 np->msi_flags & NV_MSI_X_VECTORS_MASK,
3956 np->msi_flags & NV_MSI_X_VECTORS_MASK);
3958 np->msi_flags |= NV_MSI_X_ENABLED;
3959 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3960 /* Request irq for rx handling */
3961 sprintf(np->name_rx, "%s-rx", dev->name);
3962 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3963 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3966 "request_irq failed for rx %d\n",
3968 pci_disable_msix(np->pci_dev);
3969 np->msi_flags &= ~NV_MSI_X_ENABLED;
3972 /* Request irq for tx handling */
3973 sprintf(np->name_tx, "%s-tx", dev->name);
3974 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3975 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3978 "request_irq failed for tx %d\n",
3980 pci_disable_msix(np->pci_dev);
3981 np->msi_flags &= ~NV_MSI_X_ENABLED;
3984 /* Request irq for link and timer handling */
3985 sprintf(np->name_other, "%s-other", dev->name);
3986 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3987 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3990 "request_irq failed for link %d\n",
3992 pci_disable_msix(np->pci_dev);
3993 np->msi_flags &= ~NV_MSI_X_ENABLED;
3996 /* map interrupts to their respective vector */
3997 writel(0, base + NvRegMSIXMap0);
3998 writel(0, base + NvRegMSIXMap1);
3999 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
4000 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
4001 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
4003 /* Request irq for all interrupts */
4004 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4005 handler, IRQF_SHARED, dev->name, dev);
4008 "request_irq failed %d\n",
4010 pci_disable_msix(np->pci_dev);
4011 np->msi_flags &= ~NV_MSI_X_ENABLED;
4015 /* map interrupts to vector 0 */
4016 writel(0, base + NvRegMSIXMap0);
4017 writel(0, base + NvRegMSIXMap1);
4019 netdev_info(dev, "MSI-X enabled\n");
4023 if (np->msi_flags & NV_MSI_CAPABLE) {
4024 ret = pci_enable_msi(np->pci_dev);
4026 np->msi_flags |= NV_MSI_ENABLED;
4027 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4029 netdev_info(dev, "request_irq failed %d\n",
4031 pci_disable_msi(np->pci_dev);
4032 np->msi_flags &= ~NV_MSI_ENABLED;
4036 /* map interrupts to vector 0 */
4037 writel(0, base + NvRegMSIMap0);
4038 writel(0, base + NvRegMSIMap1);
4039 /* enable msi vector 0 */
4040 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4041 netdev_info(dev, "MSI enabled\n");
4046 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4051 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4053 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4058 static void nv_free_irq(struct net_device *dev)
4060 struct fe_priv *np = get_nvpriv(dev);
4063 if (np->msi_flags & NV_MSI_X_ENABLED) {
4064 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4065 free_irq(np->msi_x_entry[i].vector, dev);
4066 pci_disable_msix(np->pci_dev);
4067 np->msi_flags &= ~NV_MSI_X_ENABLED;
4069 free_irq(np->pci_dev->irq, dev);
4070 if (np->msi_flags & NV_MSI_ENABLED) {
4071 pci_disable_msi(np->pci_dev);
4072 np->msi_flags &= ~NV_MSI_ENABLED;
4077 static void nv_do_nic_poll(struct timer_list *t)
4079 struct fe_priv *np = from_timer(np, t, nic_poll);
4080 struct net_device *dev = np->dev;
4081 u8 __iomem *base = get_hwbase(dev);
4083 unsigned long flags;
4084 unsigned int irq = 0;
4087 * First disable irq(s) and then
4088 * reenable interrupts on the nic, we have to do this before calling
4089 * nv_nic_irq because that may decide to do otherwise
4092 if (!using_multi_irqs(dev)) {
4093 if (np->msi_flags & NV_MSI_X_ENABLED)
4094 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
4096 irq = np->pci_dev->irq;
4099 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4100 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
4101 mask |= NVREG_IRQ_RX_ALL;
4103 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4104 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
4105 mask |= NVREG_IRQ_TX_ALL;
4107 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4108 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
4109 mask |= NVREG_IRQ_OTHER;
4113 disable_irq_nosync_lockdep_irqsave(irq, &flags);
4114 synchronize_irq(irq);
4116 if (np->recover_error) {
4117 np->recover_error = 0;
4118 netdev_info(dev, "MAC in recoverable error state\n");
4119 if (netif_running(dev)) {
4120 netif_tx_lock_bh(dev);
4121 netif_addr_lock(dev);
4122 spin_lock(&np->lock);
4125 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4128 /* drain rx queue */
4130 /* reinit driver view of the rx queue */
4132 if (nv_init_ring(dev)) {
4133 if (!np->in_shutdown)
4134 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4136 /* reinit nic view of the rx queue */
4137 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4138 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4139 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4140 base + NvRegRingSizes);
4142 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4144 /* clear interrupts */
4145 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4146 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4148 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4150 /* restart rx engine */
4152 spin_unlock(&np->lock);
4153 netif_addr_unlock(dev);
4154 netif_tx_unlock_bh(dev);
4158 writel(mask, base + NvRegIrqMask);
4161 if (!using_multi_irqs(dev)) {
4162 np->nic_poll_irq = 0;
4163 if (nv_optimized(np))
4164 nv_nic_irq_optimized(0, dev);
4168 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4169 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4170 nv_nic_irq_rx(0, dev);
4172 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4173 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4174 nv_nic_irq_tx(0, dev);
4176 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4177 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4178 nv_nic_irq_other(0, dev);
4182 enable_irq_lockdep_irqrestore(irq, &flags);
4185 #ifdef CONFIG_NET_POLL_CONTROLLER
4186 static void nv_poll_controller(struct net_device *dev)
4188 struct fe_priv *np = netdev_priv(dev);
4190 nv_do_nic_poll(&np->nic_poll);
4194 static void nv_do_stats_poll(struct timer_list *t)
4195 __acquires(&netdev_priv(dev)->hwstats_lock)
4196 __releases(&netdev_priv(dev)->hwstats_lock)
4198 struct fe_priv *np = from_timer(np, t, stats_poll);
4199 struct net_device *dev = np->dev;
4201 /* If lock is currently taken, the stats are being refreshed
4202 * and hence fresh enough */
4203 if (spin_trylock(&np->hwstats_lock)) {
4204 nv_update_stats(dev);
4205 spin_unlock(&np->hwstats_lock);
4208 if (!np->in_shutdown)
4209 mod_timer(&np->stats_poll,
4210 round_jiffies(jiffies + STATS_INTERVAL));
4213 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4215 struct fe_priv *np = netdev_priv(dev);
4216 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
4217 strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4218 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4221 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4223 struct fe_priv *np = netdev_priv(dev);
4224 wolinfo->supported = WAKE_MAGIC;
4226 spin_lock_irq(&np->lock);
4228 wolinfo->wolopts = WAKE_MAGIC;
4229 spin_unlock_irq(&np->lock);
4232 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4234 struct fe_priv *np = netdev_priv(dev);
4235 u8 __iomem *base = get_hwbase(dev);
4238 if (wolinfo->wolopts == 0) {
4240 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4242 flags = NVREG_WAKEUPFLAGS_ENABLE;
4244 if (netif_running(dev)) {
4245 spin_lock_irq(&np->lock);
4246 writel(flags, base + NvRegWakeUpFlags);
4247 spin_unlock_irq(&np->lock);
4249 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4253 static int nv_get_link_ksettings(struct net_device *dev,
4254 struct ethtool_link_ksettings *cmd)
4256 struct fe_priv *np = netdev_priv(dev);
4257 u32 speed, supported, advertising;
4260 spin_lock_irq(&np->lock);
4261 cmd->base.port = PORT_MII;
4262 if (!netif_running(dev)) {
4263 /* We do not track link speed / duplex setting if the
4264 * interface is disabled. Force a link check */
4265 if (nv_update_linkspeed(dev)) {
4266 netif_carrier_on(dev);
4268 netif_carrier_off(dev);
4272 if (netif_carrier_ok(dev)) {
4273 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4274 case NVREG_LINKSPEED_10:
4277 case NVREG_LINKSPEED_100:
4280 case NVREG_LINKSPEED_1000:
4287 cmd->base.duplex = DUPLEX_HALF;
4289 cmd->base.duplex = DUPLEX_FULL;
4291 speed = SPEED_UNKNOWN;
4292 cmd->base.duplex = DUPLEX_UNKNOWN;
4294 cmd->base.speed = speed;
4295 cmd->base.autoneg = np->autoneg;
4297 advertising = ADVERTISED_MII;
4299 advertising |= ADVERTISED_Autoneg;
4300 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4301 if (adv & ADVERTISE_10HALF)
4302 advertising |= ADVERTISED_10baseT_Half;
4303 if (adv & ADVERTISE_10FULL)
4304 advertising |= ADVERTISED_10baseT_Full;
4305 if (adv & ADVERTISE_100HALF)
4306 advertising |= ADVERTISED_100baseT_Half;
4307 if (adv & ADVERTISE_100FULL)
4308 advertising |= ADVERTISED_100baseT_Full;
4309 if (np->gigabit == PHY_GIGABIT) {
4310 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4311 if (adv & ADVERTISE_1000FULL)
4312 advertising |= ADVERTISED_1000baseT_Full;
4315 supported = (SUPPORTED_Autoneg |
4316 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4317 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4319 if (np->gigabit == PHY_GIGABIT)
4320 supported |= SUPPORTED_1000baseT_Full;
4322 cmd->base.phy_address = np->phyaddr;
4324 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4326 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4329 /* ignore maxtxpkt, maxrxpkt for now */
4330 spin_unlock_irq(&np->lock);
4334 static int nv_set_link_ksettings(struct net_device *dev,
4335 const struct ethtool_link_ksettings *cmd)
4337 struct fe_priv *np = netdev_priv(dev);
4338 u32 speed = cmd->base.speed;
4341 ethtool_convert_link_mode_to_legacy_u32(&advertising,
4342 cmd->link_modes.advertising);
4344 if (cmd->base.port != PORT_MII)
4346 if (cmd->base.phy_address != np->phyaddr) {
4347 /* TODO: support switching between multiple phys. Should be
4348 * trivial, but not enabled due to lack of test hardware. */
4351 if (cmd->base.autoneg == AUTONEG_ENABLE) {
4354 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4355 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4356 if (np->gigabit == PHY_GIGABIT)
4357 mask |= ADVERTISED_1000baseT_Full;
4359 if ((advertising & mask) == 0)
4362 } else if (cmd->base.autoneg == AUTONEG_DISABLE) {
4363 /* Note: autonegotiation disable, speed 1000 intentionally
4364 * forbidden - no one should need that. */
4366 if (speed != SPEED_10 && speed != SPEED_100)
4368 if (cmd->base.duplex != DUPLEX_HALF &&
4369 cmd->base.duplex != DUPLEX_FULL)
4375 netif_carrier_off(dev);
4376 if (netif_running(dev)) {
4377 unsigned long flags;
4379 nv_disable_irq(dev);
4380 netif_tx_lock_bh(dev);
4381 netif_addr_lock(dev);
4382 /* with plain spinlock lockdep complains */
4383 spin_lock_irqsave(&np->lock, flags);
4386 * this can take some time, and interrupts are disabled
4387 * due to spin_lock_irqsave, but let's hope no daemon
4388 * is going to change the settings very often...
4390 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4391 * + some minor delays, which is up to a second approximately
4394 spin_unlock_irqrestore(&np->lock, flags);
4395 netif_addr_unlock(dev);
4396 netif_tx_unlock_bh(dev);
4399 if (cmd->base.autoneg == AUTONEG_ENABLE) {
4404 /* advertise only what has been requested */
4405 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4406 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4407 if (advertising & ADVERTISED_10baseT_Half)
4408 adv |= ADVERTISE_10HALF;
4409 if (advertising & ADVERTISED_10baseT_Full)
4410 adv |= ADVERTISE_10FULL;
4411 if (advertising & ADVERTISED_100baseT_Half)
4412 adv |= ADVERTISE_100HALF;
4413 if (advertising & ADVERTISED_100baseT_Full)
4414 adv |= ADVERTISE_100FULL;
4415 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4416 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4417 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4418 adv |= ADVERTISE_PAUSE_ASYM;
4419 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4421 if (np->gigabit == PHY_GIGABIT) {
4422 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4423 adv &= ~ADVERTISE_1000FULL;
4424 if (advertising & ADVERTISED_1000baseT_Full)
4425 adv |= ADVERTISE_1000FULL;
4426 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4429 if (netif_running(dev))
4430 netdev_info(dev, "link down\n");
4431 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4432 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4433 bmcr |= BMCR_ANENABLE;
4434 /* reset the phy in order for settings to stick,
4435 * and cause autoneg to start */
4436 if (phy_reset(dev, bmcr)) {
4437 netdev_info(dev, "phy reset failed\n");
4441 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4442 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4449 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4450 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4451 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
4452 adv |= ADVERTISE_10HALF;
4453 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
4454 adv |= ADVERTISE_10FULL;
4455 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
4456 adv |= ADVERTISE_100HALF;
4457 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
4458 adv |= ADVERTISE_100FULL;
4459 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4460 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4461 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4462 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4464 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4465 adv |= ADVERTISE_PAUSE_ASYM;
4466 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4468 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4469 np->fixed_mode = adv;
4471 if (np->gigabit == PHY_GIGABIT) {
4472 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4473 adv &= ~ADVERTISE_1000FULL;
4474 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4477 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4478 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4479 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4480 bmcr |= BMCR_FULLDPLX;
4481 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4482 bmcr |= BMCR_SPEED100;
4483 if (np->phy_oui == PHY_OUI_MARVELL) {
4484 /* reset the phy in order for forced mode settings to stick */
4485 if (phy_reset(dev, bmcr)) {
4486 netdev_info(dev, "phy reset failed\n");
4490 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4491 if (netif_running(dev)) {
4492 /* Wait a bit and then reconfigure the nic. */
4499 if (netif_running(dev)) {
4507 #define FORCEDETH_REGS_VER 1
4509 static int nv_get_regs_len(struct net_device *dev)
4511 struct fe_priv *np = netdev_priv(dev);
4512 return np->register_size;
4515 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4517 struct fe_priv *np = netdev_priv(dev);
4518 u8 __iomem *base = get_hwbase(dev);
4522 regs->version = FORCEDETH_REGS_VER;
4523 spin_lock_irq(&np->lock);
4524 for (i = 0; i < np->register_size/sizeof(u32); i++)
4525 rbuf[i] = readl(base + i*sizeof(u32));
4526 spin_unlock_irq(&np->lock);
4529 static int nv_nway_reset(struct net_device *dev)
4531 struct fe_priv *np = netdev_priv(dev);
4537 netif_carrier_off(dev);
4538 if (netif_running(dev)) {
4539 nv_disable_irq(dev);
4540 netif_tx_lock_bh(dev);
4541 netif_addr_lock(dev);
4542 spin_lock(&np->lock);
4545 spin_unlock(&np->lock);
4546 netif_addr_unlock(dev);
4547 netif_tx_unlock_bh(dev);
4548 netdev_info(dev, "link down\n");
4551 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4552 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4553 bmcr |= BMCR_ANENABLE;
4554 /* reset the phy in order for settings to stick*/
4555 if (phy_reset(dev, bmcr)) {
4556 netdev_info(dev, "phy reset failed\n");
4560 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4561 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4564 if (netif_running(dev)) {
4576 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4578 struct fe_priv *np = netdev_priv(dev);
4580 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4581 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4583 ring->rx_pending = np->rx_ring_size;
4584 ring->tx_pending = np->tx_ring_size;
4587 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4589 struct fe_priv *np = netdev_priv(dev);
4590 u8 __iomem *base = get_hwbase(dev);
4591 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4592 dma_addr_t ring_addr;
4594 if (ring->rx_pending < RX_RING_MIN ||
4595 ring->tx_pending < TX_RING_MIN ||
4596 ring->rx_mini_pending != 0 ||
4597 ring->rx_jumbo_pending != 0 ||
4598 (np->desc_ver == DESC_VER_1 &&
4599 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4600 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4601 (np->desc_ver != DESC_VER_1 &&
4602 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4603 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4607 /* allocate new rings */
4608 if (!nv_optimized(np)) {
4609 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4610 sizeof(struct ring_desc) *
4613 &ring_addr, GFP_ATOMIC);
4615 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4616 sizeof(struct ring_desc_ex) *
4619 &ring_addr, GFP_ATOMIC);
4621 rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
4623 tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
4625 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4626 /* fall back to old rings */
4627 if (!nv_optimized(np)) {
4629 dma_free_coherent(&np->pci_dev->dev,
4630 sizeof(struct ring_desc) *
4633 rxtx_ring, ring_addr);
4636 dma_free_coherent(&np->pci_dev->dev,
4637 sizeof(struct ring_desc_ex) *
4640 rxtx_ring, ring_addr);
4648 if (netif_running(dev)) {
4649 nv_disable_irq(dev);
4650 nv_napi_disable(dev);
4651 netif_tx_lock_bh(dev);
4652 netif_addr_lock(dev);
4653 spin_lock(&np->lock);
4663 /* set new values */
4664 np->rx_ring_size = ring->rx_pending;
4665 np->tx_ring_size = ring->tx_pending;
4667 if (!nv_optimized(np)) {
4668 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4669 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4671 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4672 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4674 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4675 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4676 np->ring_addr = ring_addr;
4678 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4679 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4681 if (netif_running(dev)) {
4682 /* reinit driver view of the queues */
4684 if (nv_init_ring(dev)) {
4685 if (!np->in_shutdown)
4686 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4689 /* reinit nic view of the queues */
4690 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4691 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4692 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4693 base + NvRegRingSizes);
4695 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4698 /* restart engines */
4700 spin_unlock(&np->lock);
4701 netif_addr_unlock(dev);
4702 netif_tx_unlock_bh(dev);
4703 nv_napi_enable(dev);
4711 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4713 struct fe_priv *np = netdev_priv(dev);
4715 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4716 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4717 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4720 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4722 struct fe_priv *np = netdev_priv(dev);
4725 if ((!np->autoneg && np->duplex == 0) ||
4726 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4727 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4730 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4731 netdev_info(dev, "hardware does not support tx pause frames\n");
4735 netif_carrier_off(dev);
4736 if (netif_running(dev)) {
4737 nv_disable_irq(dev);
4738 netif_tx_lock_bh(dev);
4739 netif_addr_lock(dev);
4740 spin_lock(&np->lock);
4743 spin_unlock(&np->lock);
4744 netif_addr_unlock(dev);
4745 netif_tx_unlock_bh(dev);
4748 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4749 if (pause->rx_pause)
4750 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4751 if (pause->tx_pause)
4752 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4754 if (np->autoneg && pause->autoneg) {
4755 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4757 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4758 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4759 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4760 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4761 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4762 adv |= ADVERTISE_PAUSE_ASYM;
4763 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4765 if (netif_running(dev))
4766 netdev_info(dev, "link down\n");
4767 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4768 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4769 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4771 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4772 if (pause->rx_pause)
4773 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4774 if (pause->tx_pause)
4775 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4777 if (!netif_running(dev))
4778 nv_update_linkspeed(dev);
4780 nv_update_pause(dev, np->pause_flags);
4783 if (netif_running(dev)) {
4790 static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4792 struct fe_priv *np = netdev_priv(dev);
4793 unsigned long flags;
4795 int err, retval = 0;
4797 spin_lock_irqsave(&np->lock, flags);
4798 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4799 if (features & NETIF_F_LOOPBACK) {
4800 if (miicontrol & BMCR_LOOPBACK) {
4801 spin_unlock_irqrestore(&np->lock, flags);
4802 netdev_info(dev, "Loopback already enabled\n");
4805 nv_disable_irq(dev);
4806 /* Turn on loopback mode */
4807 miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4808 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4811 spin_unlock_irqrestore(&np->lock, flags);
4814 if (netif_running(dev)) {
4815 /* Force 1000 Mbps full-duplex */
4816 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4819 netif_carrier_on(dev);
4821 spin_unlock_irqrestore(&np->lock, flags);
4823 "Internal PHY loopback mode enabled.\n");
4826 if (!(miicontrol & BMCR_LOOPBACK)) {
4827 spin_unlock_irqrestore(&np->lock, flags);
4828 netdev_info(dev, "Loopback already disabled\n");
4831 nv_disable_irq(dev);
4832 /* Turn off loopback */
4833 spin_unlock_irqrestore(&np->lock, flags);
4834 netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4838 spin_lock_irqsave(&np->lock, flags);
4840 spin_unlock_irqrestore(&np->lock, flags);
4845 static netdev_features_t nv_fix_features(struct net_device *dev,
4846 netdev_features_t features)
4848 /* vlan is dependent on rx checksum offload */
4849 if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4850 features |= NETIF_F_RXCSUM;
4855 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4857 struct fe_priv *np = get_nvpriv(dev);
4859 spin_lock_irq(&np->lock);
4861 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4862 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4864 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4866 if (features & NETIF_F_HW_VLAN_CTAG_TX)
4867 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4869 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4871 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4873 spin_unlock_irq(&np->lock);
4876 static int nv_set_features(struct net_device *dev, netdev_features_t features)
4878 struct fe_priv *np = netdev_priv(dev);
4879 u8 __iomem *base = get_hwbase(dev);
4880 netdev_features_t changed = dev->features ^ features;
4883 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4884 retval = nv_set_loopback(dev, features);
4889 if (changed & NETIF_F_RXCSUM) {
4890 spin_lock_irq(&np->lock);
4892 if (features & NETIF_F_RXCSUM)
4893 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4895 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4897 if (netif_running(dev))
4898 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4900 spin_unlock_irq(&np->lock);
4903 if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4904 nv_vlan_mode(dev, features);
4909 static int nv_get_sset_count(struct net_device *dev, int sset)
4911 struct fe_priv *np = netdev_priv(dev);
4915 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4916 return NV_TEST_COUNT_EXTENDED;
4918 return NV_TEST_COUNT_BASE;
4920 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4921 return NV_DEV_STATISTICS_V3_COUNT;
4922 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4923 return NV_DEV_STATISTICS_V2_COUNT;
4924 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4925 return NV_DEV_STATISTICS_V1_COUNT;
4933 static void nv_get_ethtool_stats(struct net_device *dev,
4934 struct ethtool_stats *estats, u64 *buffer)
4935 __acquires(&netdev_priv(dev)->hwstats_lock)
4936 __releases(&netdev_priv(dev)->hwstats_lock)
4938 struct fe_priv *np = netdev_priv(dev);
4940 spin_lock_bh(&np->hwstats_lock);
4941 nv_update_stats(dev);
4942 memcpy(buffer, &np->estats,
4943 nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4944 spin_unlock_bh(&np->hwstats_lock);
4947 static int nv_link_test(struct net_device *dev)
4949 struct fe_priv *np = netdev_priv(dev);
4952 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4953 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4955 /* check phy link status */
4956 if (!(mii_status & BMSR_LSTATUS))
4962 static int nv_register_test(struct net_device *dev)
4964 u8 __iomem *base = get_hwbase(dev);
4966 u32 orig_read, new_read;
4969 orig_read = readl(base + nv_registers_test[i].reg);
4971 /* xor with mask to toggle bits */
4972 orig_read ^= nv_registers_test[i].mask;
4974 writel(orig_read, base + nv_registers_test[i].reg);
4976 new_read = readl(base + nv_registers_test[i].reg);
4978 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4981 /* restore original value */
4982 orig_read ^= nv_registers_test[i].mask;
4983 writel(orig_read, base + nv_registers_test[i].reg);
4985 } while (nv_registers_test[++i].reg != 0);
4990 static int nv_interrupt_test(struct net_device *dev)
4992 struct fe_priv *np = netdev_priv(dev);
4993 u8 __iomem *base = get_hwbase(dev);
4996 u32 save_msi_flags, save_poll_interval = 0;
4998 if (netif_running(dev)) {
4999 /* free current irq */
5001 save_poll_interval = readl(base+NvRegPollingInterval);
5004 /* flag to test interrupt handler */
5007 /* setup test irq */
5008 save_msi_flags = np->msi_flags;
5009 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
5010 np->msi_flags |= 0x001; /* setup 1 vector */
5011 if (nv_request_irq(dev, 1))
5014 /* setup timer interrupt */
5015 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5016 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5018 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5020 /* wait for at least one interrupt */
5023 spin_lock_irq(&np->lock);
5025 /* flag should be set within ISR */
5026 testcnt = np->intr_test;
5030 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5031 if (!(np->msi_flags & NV_MSI_X_ENABLED))
5032 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5034 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5036 spin_unlock_irq(&np->lock);
5040 np->msi_flags = save_msi_flags;
5042 if (netif_running(dev)) {
5043 writel(save_poll_interval, base + NvRegPollingInterval);
5044 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5045 /* restore original irq */
5046 if (nv_request_irq(dev, 0))
5053 static int nv_loopback_test(struct net_device *dev)
5055 struct fe_priv *np = netdev_priv(dev);
5056 u8 __iomem *base = get_hwbase(dev);
5057 struct sk_buff *tx_skb, *rx_skb;
5058 dma_addr_t test_dma_addr;
5059 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5061 int len, i, pkt_len;
5063 u32 filter_flags = 0;
5064 u32 misc1_flags = 0;
5067 if (netif_running(dev)) {
5068 nv_disable_irq(dev);
5069 filter_flags = readl(base + NvRegPacketFilterFlags);
5070 misc1_flags = readl(base + NvRegMisc1);
5075 /* reinit driver view of the rx queue */
5079 /* setup hardware for loopback */
5080 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5081 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5083 /* reinit nic view of the rx queue */
5084 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5085 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5086 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5087 base + NvRegRingSizes);
5090 /* restart rx engine */
5093 /* setup packet for tx */
5094 pkt_len = ETH_DATA_LEN;
5095 tx_skb = netdev_alloc_skb(dev, pkt_len);
5100 test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
5101 skb_tailroom(tx_skb),
5103 if (unlikely(dma_mapping_error(&np->pci_dev->dev,
5105 dev_kfree_skb_any(tx_skb);
5108 pkt_data = skb_put(tx_skb, pkt_len);
5109 for (i = 0; i < pkt_len; i++)
5110 pkt_data[i] = (u8)(i & 0xff);
5112 if (!nv_optimized(np)) {
5113 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5114 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5116 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5117 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5118 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5120 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5121 pci_push(get_hwbase(dev));
5125 /* check for rx of the packet */
5126 if (!nv_optimized(np)) {
5127 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5128 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5131 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5132 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5135 if (flags & NV_RX_AVAIL) {
5137 } else if (np->desc_ver == DESC_VER_1) {
5138 if (flags & NV_RX_ERROR)
5141 if (flags & NV_RX2_ERROR)
5146 if (len != pkt_len) {
5149 rx_skb = np->rx_skb[0].skb;
5150 for (i = 0; i < pkt_len; i++) {
5151 if (rx_skb->data[i] != (u8)(i & 0xff)) {
5159 dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
5160 (skb_end_pointer(tx_skb) - tx_skb->data),
5162 dev_kfree_skb_any(tx_skb);
5167 /* drain rx queue */
5170 if (netif_running(dev)) {
5171 writel(misc1_flags, base + NvRegMisc1);
5172 writel(filter_flags, base + NvRegPacketFilterFlags);
5179 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5181 struct fe_priv *np = netdev_priv(dev);
5182 u8 __iomem *base = get_hwbase(dev);
5185 count = nv_get_sset_count(dev, ETH_SS_TEST);
5186 memset(buffer, 0, count * sizeof(u64));
5188 if (!nv_link_test(dev)) {
5189 test->flags |= ETH_TEST_FL_FAILED;
5193 if (test->flags & ETH_TEST_FL_OFFLINE) {
5194 if (netif_running(dev)) {
5195 netif_stop_queue(dev);
5196 nv_napi_disable(dev);
5197 netif_tx_lock_bh(dev);
5198 netif_addr_lock(dev);
5199 spin_lock_irq(&np->lock);
5200 nv_disable_hw_interrupts(dev, np->irqmask);
5201 if (!(np->msi_flags & NV_MSI_X_ENABLED))
5202 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5204 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5208 /* drain rx queue */
5210 spin_unlock_irq(&np->lock);
5211 netif_addr_unlock(dev);
5212 netif_tx_unlock_bh(dev);
5215 if (!nv_register_test(dev)) {
5216 test->flags |= ETH_TEST_FL_FAILED;
5220 result = nv_interrupt_test(dev);
5222 test->flags |= ETH_TEST_FL_FAILED;
5230 if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5231 test->flags |= ETH_TEST_FL_FAILED;
5235 if (netif_running(dev)) {
5236 /* reinit driver view of the rx queue */
5238 if (nv_init_ring(dev)) {
5239 if (!np->in_shutdown)
5240 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5242 /* reinit nic view of the rx queue */
5243 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5244 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5245 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5246 base + NvRegRingSizes);
5248 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5250 /* restart rx engine */
5252 netif_start_queue(dev);
5253 nv_napi_enable(dev);
5254 nv_enable_hw_interrupts(dev, np->irqmask);
5259 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5261 switch (stringset) {
5263 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5266 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5271 static const struct ethtool_ops ops = {
5272 .get_drvinfo = nv_get_drvinfo,
5273 .get_link = ethtool_op_get_link,
5274 .get_wol = nv_get_wol,
5275 .set_wol = nv_set_wol,
5276 .get_regs_len = nv_get_regs_len,
5277 .get_regs = nv_get_regs,
5278 .nway_reset = nv_nway_reset,
5279 .get_ringparam = nv_get_ringparam,
5280 .set_ringparam = nv_set_ringparam,
5281 .get_pauseparam = nv_get_pauseparam,
5282 .set_pauseparam = nv_set_pauseparam,
5283 .get_strings = nv_get_strings,
5284 .get_ethtool_stats = nv_get_ethtool_stats,
5285 .get_sset_count = nv_get_sset_count,
5286 .self_test = nv_self_test,
5287 .get_ts_info = ethtool_op_get_ts_info,
5288 .get_link_ksettings = nv_get_link_ksettings,
5289 .set_link_ksettings = nv_set_link_ksettings,
5292 /* The mgmt unit and driver use a semaphore to access the phy during init */
5293 static int nv_mgmt_acquire_sema(struct net_device *dev)
5295 struct fe_priv *np = netdev_priv(dev);
5296 u8 __iomem *base = get_hwbase(dev);
5298 u32 tx_ctrl, mgmt_sema;
5300 for (i = 0; i < 10; i++) {
5301 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5302 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5307 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5310 for (i = 0; i < 2; i++) {
5311 tx_ctrl = readl(base + NvRegTransmitterControl);
5312 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5313 writel(tx_ctrl, base + NvRegTransmitterControl);
5315 /* verify that semaphore was acquired */
5316 tx_ctrl = readl(base + NvRegTransmitterControl);
5317 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5318 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5328 static void nv_mgmt_release_sema(struct net_device *dev)
5330 struct fe_priv *np = netdev_priv(dev);
5331 u8 __iomem *base = get_hwbase(dev);
5334 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5335 if (np->mgmt_sema) {
5336 tx_ctrl = readl(base + NvRegTransmitterControl);
5337 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5338 writel(tx_ctrl, base + NvRegTransmitterControl);
5344 static int nv_mgmt_get_version(struct net_device *dev)
5346 struct fe_priv *np = netdev_priv(dev);
5347 u8 __iomem *base = get_hwbase(dev);
5348 u32 data_ready = readl(base + NvRegTransmitterControl);
5349 u32 data_ready2 = 0;
5350 unsigned long start;
5353 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5354 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5356 while (time_before(jiffies, start + 5*HZ)) {
5357 data_ready2 = readl(base + NvRegTransmitterControl);
5358 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5362 schedule_timeout_uninterruptible(1);
5365 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5368 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5373 static int nv_open(struct net_device *dev)
5375 struct fe_priv *np = netdev_priv(dev);
5376 u8 __iomem *base = get_hwbase(dev);
5382 mii_rw(dev, np->phyaddr, MII_BMCR,
5383 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5385 nv_txrx_gate(dev, false);
5386 /* erase previous misconfiguration */
5387 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5389 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5390 writel(0, base + NvRegMulticastAddrB);
5391 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5392 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5393 writel(0, base + NvRegPacketFilterFlags);
5395 writel(0, base + NvRegTransmitterControl);
5396 writel(0, base + NvRegReceiverControl);
5398 writel(0, base + NvRegAdapterControl);
5400 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5401 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5403 /* initialize descriptor rings */
5405 oom = nv_init_ring(dev);
5407 writel(0, base + NvRegLinkSpeed);
5408 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5410 writel(0, base + NvRegUnknownSetupReg6);
5412 np->in_shutdown = 0;
5415 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5416 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5417 base + NvRegRingSizes);
5419 writel(np->linkspeed, base + NvRegLinkSpeed);
5420 if (np->desc_ver == DESC_VER_1)
5421 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5423 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5424 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5425 writel(np->vlanctl_bits, base + NvRegVlanControl);
5427 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5428 if (reg_delay(dev, NvRegUnknownSetupReg5,
5429 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5430 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5432 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5434 writel(0, base + NvRegMIIMask);
5435 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5436 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5438 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5439 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5440 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5441 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5443 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5445 get_random_bytes(&low, sizeof(low));
5446 low &= NVREG_SLOTTIME_MASK;
5447 if (np->desc_ver == DESC_VER_1) {
5448 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5450 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5451 /* setup legacy backoff */
5452 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5454 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5455 nv_gear_backoff_reseed(dev);
5458 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5459 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5460 if (poll_interval == -1) {
5461 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5462 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5464 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5466 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5467 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5468 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5469 base + NvRegAdapterControl);
5470 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5471 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5473 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5475 i = readl(base + NvRegPowerState);
5476 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5477 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5481 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5483 nv_disable_hw_interrupts(dev, np->irqmask);
5485 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5486 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5489 if (nv_request_irq(dev, 0))
5492 /* ask for interrupts */
5493 nv_enable_hw_interrupts(dev, np->irqmask);
5495 spin_lock_irq(&np->lock);
5496 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5497 writel(0, base + NvRegMulticastAddrB);
5498 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5499 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5500 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5501 /* One manual link speed update: Interrupts are enabled, future link
5502 * speed changes cause interrupts and are handled by nv_link_irq().
5504 readl(base + NvRegMIIStatus);
5505 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5507 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5510 ret = nv_update_linkspeed(dev);
5512 netif_start_queue(dev);
5513 nv_napi_enable(dev);
5516 netif_carrier_on(dev);
5518 netdev_info(dev, "no link during initialization\n");
5519 netif_carrier_off(dev);
5522 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5524 /* start statistics timer */
5525 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5526 mod_timer(&np->stats_poll,
5527 round_jiffies(jiffies + STATS_INTERVAL));
5529 spin_unlock_irq(&np->lock);
5531 /* If the loopback feature was set while the device was down, make sure
5532 * that it's set correctly now.
5534 if (dev->features & NETIF_F_LOOPBACK)
5535 nv_set_loopback(dev, dev->features);
5543 static int nv_close(struct net_device *dev)
5545 struct fe_priv *np = netdev_priv(dev);
5548 spin_lock_irq(&np->lock);
5549 np->in_shutdown = 1;
5550 spin_unlock_irq(&np->lock);
5551 nv_napi_disable(dev);
5552 synchronize_irq(np->pci_dev->irq);
5554 del_timer_sync(&np->oom_kick);
5555 del_timer_sync(&np->nic_poll);
5556 del_timer_sync(&np->stats_poll);
5558 netif_stop_queue(dev);
5559 spin_lock_irq(&np->lock);
5560 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5564 /* disable interrupts on the nic or we will lock up */
5565 base = get_hwbase(dev);
5566 nv_disable_hw_interrupts(dev, np->irqmask);
5569 spin_unlock_irq(&np->lock);
5575 if (np->wolenabled || !phy_power_down) {
5576 nv_txrx_gate(dev, false);
5577 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5580 /* power down phy */
5581 mii_rw(dev, np->phyaddr, MII_BMCR,
5582 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5583 nv_txrx_gate(dev, true);
5586 /* FIXME: power down nic */
5591 static const struct net_device_ops nv_netdev_ops = {
5592 .ndo_open = nv_open,
5593 .ndo_stop = nv_close,
5594 .ndo_get_stats64 = nv_get_stats64,
5595 .ndo_start_xmit = nv_start_xmit,
5596 .ndo_tx_timeout = nv_tx_timeout,
5597 .ndo_change_mtu = nv_change_mtu,
5598 .ndo_fix_features = nv_fix_features,
5599 .ndo_set_features = nv_set_features,
5600 .ndo_validate_addr = eth_validate_addr,
5601 .ndo_set_mac_address = nv_set_mac_address,
5602 .ndo_set_rx_mode = nv_set_multicast,
5603 #ifdef CONFIG_NET_POLL_CONTROLLER
5604 .ndo_poll_controller = nv_poll_controller,
5608 static const struct net_device_ops nv_netdev_ops_optimized = {
5609 .ndo_open = nv_open,
5610 .ndo_stop = nv_close,
5611 .ndo_get_stats64 = nv_get_stats64,
5612 .ndo_start_xmit = nv_start_xmit_optimized,
5613 .ndo_tx_timeout = nv_tx_timeout,
5614 .ndo_change_mtu = nv_change_mtu,
5615 .ndo_fix_features = nv_fix_features,
5616 .ndo_set_features = nv_set_features,
5617 .ndo_validate_addr = eth_validate_addr,
5618 .ndo_set_mac_address = nv_set_mac_address,
5619 .ndo_set_rx_mode = nv_set_multicast,
5620 #ifdef CONFIG_NET_POLL_CONTROLLER
5621 .ndo_poll_controller = nv_poll_controller,
5625 static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5627 struct net_device *dev;
5632 u32 powerstate, txreg;
5633 u32 phystate_orig = 0, phystate;
5634 int phyinitialized = 0;
5635 static int printed_version;
5637 if (!printed_version++)
5638 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5641 dev = alloc_etherdev(sizeof(struct fe_priv));
5646 np = netdev_priv(dev);
5648 np->pci_dev = pci_dev;
5649 spin_lock_init(&np->lock);
5650 spin_lock_init(&np->hwstats_lock);
5651 SET_NETDEV_DEV(dev, &pci_dev->dev);
5652 u64_stats_init(&np->swstats_rx_syncp);
5653 u64_stats_init(&np->swstats_tx_syncp);
5655 timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5656 timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
5657 timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
5659 err = pci_enable_device(pci_dev);
5663 pci_set_master(pci_dev);
5665 err = pci_request_regions(pci_dev, DRV_NAME);
5669 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5670 np->register_size = NV_PCI_REGSZ_VER3;
5671 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5672 np->register_size = NV_PCI_REGSZ_VER2;
5674 np->register_size = NV_PCI_REGSZ_VER1;
5678 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5679 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5680 pci_resource_len(pci_dev, i) >= np->register_size) {
5681 addr = pci_resource_start(pci_dev, i);
5685 if (i == DEVICE_COUNT_RESOURCE) {
5686 dev_info(&pci_dev->dev, "Couldn't find register window\n");
5690 /* copy of driver data */
5691 np->driver_data = id->driver_data;
5692 /* copy of device id */
5693 np->device_id = id->device;
5695 /* handle different descriptor versions */
5696 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5697 /* packet format 3: supports 40-bit addressing */
5698 np->desc_ver = DESC_VER_3;
5699 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5701 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5702 dev_info(&pci_dev->dev,
5703 "64-bit DMA failed, using 32-bit addressing\n");
5705 dev->features |= NETIF_F_HIGHDMA;
5706 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5707 dev_info(&pci_dev->dev,
5708 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5711 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5712 /* packet format 2: supports jumbo frames */
5713 np->desc_ver = DESC_VER_2;
5714 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5716 /* original packet format */
5717 np->desc_ver = DESC_VER_1;
5718 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5721 np->pkt_limit = NV_PKTLIMIT_1;
5722 if (id->driver_data & DEV_HAS_LARGEDESC)
5723 np->pkt_limit = NV_PKTLIMIT_2;
5725 if (id->driver_data & DEV_HAS_CHECKSUM) {
5726 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5727 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5728 NETIF_F_TSO | NETIF_F_RXCSUM;
5731 np->vlanctl_bits = 0;
5732 if (id->driver_data & DEV_HAS_VLAN) {
5733 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5734 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5735 NETIF_F_HW_VLAN_CTAG_TX;
5738 dev->features |= dev->hw_features;
5740 /* Add loopback capability to the device. */
5741 dev->hw_features |= NETIF_F_LOOPBACK;
5743 /* MTU range: 64 - 1500 or 9100 */
5744 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
5745 dev->max_mtu = np->pkt_limit;
5747 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5748 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5749 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5750 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5751 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5755 np->base = ioremap(addr, np->register_size);
5759 np->rx_ring_size = RX_RING_DEFAULT;
5760 np->tx_ring_size = TX_RING_DEFAULT;
5762 if (!nv_optimized(np)) {
5763 np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
5764 sizeof(struct ring_desc) *
5769 if (!np->rx_ring.orig)
5771 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5773 np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
5774 sizeof(struct ring_desc_ex) *
5777 &np->ring_addr, GFP_KERNEL);
5778 if (!np->rx_ring.ex)
5780 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5782 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5783 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5784 if (!np->rx_skb || !np->tx_skb)
5787 if (!nv_optimized(np))
5788 dev->netdev_ops = &nv_netdev_ops;
5790 dev->netdev_ops = &nv_netdev_ops_optimized;
5792 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5793 dev->ethtool_ops = &ops;
5794 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5796 pci_set_drvdata(pci_dev, dev);
5798 /* read the mac address */
5799 base = get_hwbase(dev);
5800 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5801 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5803 /* check the workaround bit for correct mac address order */
5804 txreg = readl(base + NvRegTransmitPoll);
5805 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5806 /* mac address is already in correct order */
5807 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5808 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5809 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5810 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5811 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5812 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5813 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5814 /* mac address is already in correct order */
5815 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5816 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5817 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5818 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5819 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5820 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5822 * Set orig mac address back to the reversed version.
5823 * This flag will be cleared during low power transition.
5824 * Therefore, we should always put back the reversed address.
5826 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5827 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5828 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5830 /* need to reverse mac address to correct order */
5831 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5832 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5833 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5834 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5835 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5836 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5837 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5838 dev_dbg(&pci_dev->dev,
5839 "%s: set workaround bit for reversed mac addr\n",
5843 if (!is_valid_ether_addr(dev->dev_addr)) {
5845 * Bad mac address. At least one bios sets the mac address
5846 * to 01:23:45:67:89:ab
5848 dev_err(&pci_dev->dev,
5849 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5851 eth_hw_addr_random(dev);
5852 dev_err(&pci_dev->dev,
5853 "Using random MAC address: %pM\n", dev->dev_addr);
5856 /* set mac address */
5857 nv_copy_mac_to_hw(dev);
5860 writel(0, base + NvRegWakeUpFlags);
5862 device_set_wakeup_enable(&pci_dev->dev, false);
5864 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5866 /* take phy and nic out of low power mode */
5867 powerstate = readl(base + NvRegPowerState2);
5868 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5869 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5870 pci_dev->revision >= 0xA3)
5871 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5872 writel(powerstate, base + NvRegPowerState2);
5875 if (np->desc_ver == DESC_VER_1)
5876 np->tx_flags = NV_TX_VALID;
5878 np->tx_flags = NV_TX2_VALID;
5881 if ((id->driver_data & DEV_HAS_MSI) && msi)
5882 np->msi_flags |= NV_MSI_CAPABLE;
5884 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5885 /* msix has had reported issues when modifying irqmask
5886 as in the case of napi, therefore, disable for now
5889 np->msi_flags |= NV_MSI_X_CAPABLE;
5893 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5894 np->irqmask = NVREG_IRQMASK_CPU;
5895 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5896 np->msi_flags |= 0x0001;
5897 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5898 !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5899 /* start off in throughput mode */
5900 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5901 /* remove support for msix mode */
5902 np->msi_flags &= ~NV_MSI_X_CAPABLE;
5904 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5905 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5906 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5907 np->msi_flags |= 0x0003;
5910 if (id->driver_data & DEV_NEED_TIMERIRQ)
5911 np->irqmask |= NVREG_IRQ_TIMER;
5912 if (id->driver_data & DEV_NEED_LINKTIMER) {
5913 np->need_linktimer = 1;
5914 np->link_timeout = jiffies + LINK_TIMEOUT;
5916 np->need_linktimer = 0;
5919 /* Limit the number of tx's outstanding for hw bug */
5920 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5922 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5923 pci_dev->revision >= 0xA2)
5927 /* clear phy state and temporarily halt phy interrupts */
5928 writel(0, base + NvRegMIIMask);
5929 phystate = readl(base + NvRegAdapterControl);
5930 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5932 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5933 writel(phystate, base + NvRegAdapterControl);
5935 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5937 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5938 /* management unit running on the mac? */
5939 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5940 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5941 nv_mgmt_acquire_sema(dev) &&
5942 nv_mgmt_get_version(dev)) {
5944 if (np->mgmt_version > 0)
5945 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5946 /* management unit setup the phy already? */
5947 if (np->mac_in_use &&
5948 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5949 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5950 /* phy is inited by mgmt unit */
5953 /* we need to init the phy */
5958 /* find a suitable phy */
5959 for (i = 1; i <= 32; i++) {
5961 int phyaddr = i & 0x1F;
5963 spin_lock_irq(&np->lock);
5964 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5965 spin_unlock_irq(&np->lock);
5966 if (id1 < 0 || id1 == 0xffff)
5968 spin_lock_irq(&np->lock);
5969 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5970 spin_unlock_irq(&np->lock);
5971 if (id2 < 0 || id2 == 0xffff)
5974 np->phy_model = id2 & PHYID2_MODEL_MASK;
5975 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5976 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5977 np->phyaddr = phyaddr;
5978 np->phy_oui = id1 | id2;
5980 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5981 if (np->phy_oui == PHY_OUI_REALTEK2)
5982 np->phy_oui = PHY_OUI_REALTEK;
5983 /* Setup phy revision for Realtek */
5984 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5985 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5990 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5994 if (!phyinitialized) {
5998 /* see if it is a gigabit phy */
5999 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6000 if (mii_status & PHY_GIGABIT)
6001 np->gigabit = PHY_GIGABIT;
6004 /* set default link speed settings */
6005 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
6009 err = register_netdev(dev);
6011 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
6015 netif_carrier_off(dev);
6017 /* Some NICs freeze when TX pause is enabled while NIC is
6018 * down, and this stays across warm reboots. The sequence
6019 * below should be enough to recover from that state.
6021 nv_update_pause(dev, 0);
6025 if (id->driver_data & DEV_HAS_VLAN)
6026 nv_vlan_mode(dev, dev->features);
6028 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
6029 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6031 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6032 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6033 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6035 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6036 NETIF_F_HW_VLAN_CTAG_TX) ?
6038 dev->features & (NETIF_F_LOOPBACK) ?
6040 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6041 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6042 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6043 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6044 np->need_linktimer ? "lnktim " : "",
6045 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6046 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6053 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6057 iounmap(get_hwbase(dev));
6059 pci_release_regions(pci_dev);
6061 pci_disable_device(pci_dev);
6068 static void nv_restore_phy(struct net_device *dev)
6070 struct fe_priv *np = netdev_priv(dev);
6071 u16 phy_reserved, mii_control;
6073 if (np->phy_oui == PHY_OUI_REALTEK &&
6074 np->phy_model == PHY_MODEL_REALTEK_8201 &&
6075 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6076 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6077 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6078 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6079 phy_reserved |= PHY_REALTEK_INIT8;
6080 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6081 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6083 /* restart auto negotiation */
6084 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6085 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6086 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6090 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6092 struct net_device *dev = pci_get_drvdata(pci_dev);
6093 struct fe_priv *np = netdev_priv(dev);
6094 u8 __iomem *base = get_hwbase(dev);
6096 /* special op: write back the misordered MAC address - otherwise
6097 * the next nv_probe would see a wrong address.
6099 writel(np->orig_mac[0], base + NvRegMacAddrA);
6100 writel(np->orig_mac[1], base + NvRegMacAddrB);
6101 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6102 base + NvRegTransmitPoll);
6105 static void nv_remove(struct pci_dev *pci_dev)
6107 struct net_device *dev = pci_get_drvdata(pci_dev);
6109 unregister_netdev(dev);
6111 nv_restore_mac_addr(pci_dev);
6113 /* restore any phy related changes */
6114 nv_restore_phy(dev);
6116 nv_mgmt_release_sema(dev);
6118 /* free all structures */
6120 iounmap(get_hwbase(dev));
6121 pci_release_regions(pci_dev);
6122 pci_disable_device(pci_dev);
6126 #ifdef CONFIG_PM_SLEEP
6127 static int nv_suspend(struct device *device)
6129 struct pci_dev *pdev = to_pci_dev(device);
6130 struct net_device *dev = pci_get_drvdata(pdev);
6131 struct fe_priv *np = netdev_priv(dev);
6132 u8 __iomem *base = get_hwbase(dev);
6135 if (netif_running(dev)) {
6139 netif_device_detach(dev);
6141 /* save non-pci configuration space */
6142 for (i = 0; i <= np->register_size/sizeof(u32); i++)
6143 np->saved_config_space[i] = readl(base + i*sizeof(u32));
6148 static int nv_resume(struct device *device)
6150 struct pci_dev *pdev = to_pci_dev(device);
6151 struct net_device *dev = pci_get_drvdata(pdev);
6152 struct fe_priv *np = netdev_priv(dev);
6153 u8 __iomem *base = get_hwbase(dev);
6156 /* restore non-pci configuration space */
6157 for (i = 0; i <= np->register_size/sizeof(u32); i++)
6158 writel(np->saved_config_space[i], base+i*sizeof(u32));
6160 if (np->driver_data & DEV_NEED_MSI_FIX)
6161 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6163 /* restore phy state, including autoneg */
6166 netif_device_attach(dev);
6167 if (netif_running(dev)) {
6169 nv_set_multicast(dev);
6174 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6175 #define NV_PM_OPS (&nv_pm_ops)
6178 #define NV_PM_OPS NULL
6179 #endif /* CONFIG_PM_SLEEP */
6182 static void nv_shutdown(struct pci_dev *pdev)
6184 struct net_device *dev = pci_get_drvdata(pdev);
6185 struct fe_priv *np = netdev_priv(dev);
6187 if (netif_running(dev))
6191 * Restore the MAC so a kernel started by kexec won't get confused.
6192 * If we really go for poweroff, we must not restore the MAC,
6193 * otherwise the MAC for WOL will be reversed at least on some boards.
6195 if (system_state != SYSTEM_POWER_OFF)
6196 nv_restore_mac_addr(pdev);
6198 pci_disable_device(pdev);
6200 * Apparently it is not possible to reinitialise from D3 hot,
6201 * only put the device into D3 if we really go for poweroff.
6203 if (system_state == SYSTEM_POWER_OFF) {
6204 pci_wake_from_d3(pdev, np->wolenabled);
6205 pci_set_power_state(pdev, PCI_D3hot);
6209 #define nv_shutdown NULL
6210 #endif /* CONFIG_PM */
6212 static const struct pci_device_id pci_tbl[] = {
6213 { /* nForce Ethernet Controller */
6214 PCI_DEVICE(0x10DE, 0x01C3),
6215 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6217 { /* nForce2 Ethernet Controller */
6218 PCI_DEVICE(0x10DE, 0x0066),
6219 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6221 { /* nForce3 Ethernet Controller */
6222 PCI_DEVICE(0x10DE, 0x00D6),
6223 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6225 { /* nForce3 Ethernet Controller */
6226 PCI_DEVICE(0x10DE, 0x0086),
6227 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6229 { /* nForce3 Ethernet Controller */
6230 PCI_DEVICE(0x10DE, 0x008C),
6231 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6233 { /* nForce3 Ethernet Controller */
6234 PCI_DEVICE(0x10DE, 0x00E6),
6235 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6237 { /* nForce3 Ethernet Controller */
6238 PCI_DEVICE(0x10DE, 0x00DF),
6239 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6241 { /* CK804 Ethernet Controller */
6242 PCI_DEVICE(0x10DE, 0x0056),
6243 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6245 { /* CK804 Ethernet Controller */
6246 PCI_DEVICE(0x10DE, 0x0057),
6247 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6249 { /* MCP04 Ethernet Controller */
6250 PCI_DEVICE(0x10DE, 0x0037),
6251 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6253 { /* MCP04 Ethernet Controller */
6254 PCI_DEVICE(0x10DE, 0x0038),
6255 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6257 { /* MCP51 Ethernet Controller */
6258 PCI_DEVICE(0x10DE, 0x0268),
6259 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6261 { /* MCP51 Ethernet Controller */
6262 PCI_DEVICE(0x10DE, 0x0269),
6263 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6265 { /* MCP55 Ethernet Controller */
6266 PCI_DEVICE(0x10DE, 0x0372),
6267 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6269 { /* MCP55 Ethernet Controller */
6270 PCI_DEVICE(0x10DE, 0x0373),
6271 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6273 { /* MCP61 Ethernet Controller */
6274 PCI_DEVICE(0x10DE, 0x03E5),
6275 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6277 { /* MCP61 Ethernet Controller */
6278 PCI_DEVICE(0x10DE, 0x03E6),
6279 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6281 { /* MCP61 Ethernet Controller */
6282 PCI_DEVICE(0x10DE, 0x03EE),
6283 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6285 { /* MCP61 Ethernet Controller */
6286 PCI_DEVICE(0x10DE, 0x03EF),
6287 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6289 { /* MCP65 Ethernet Controller */
6290 PCI_DEVICE(0x10DE, 0x0450),
6291 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6293 { /* MCP65 Ethernet Controller */
6294 PCI_DEVICE(0x10DE, 0x0451),
6295 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6297 { /* MCP65 Ethernet Controller */
6298 PCI_DEVICE(0x10DE, 0x0452),
6299 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6301 { /* MCP65 Ethernet Controller */
6302 PCI_DEVICE(0x10DE, 0x0453),
6303 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6305 { /* MCP67 Ethernet Controller */
6306 PCI_DEVICE(0x10DE, 0x054C),
6307 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6309 { /* MCP67 Ethernet Controller */
6310 PCI_DEVICE(0x10DE, 0x054D),
6311 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6313 { /* MCP67 Ethernet Controller */
6314 PCI_DEVICE(0x10DE, 0x054E),
6315 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6317 { /* MCP67 Ethernet Controller */
6318 PCI_DEVICE(0x10DE, 0x054F),
6319 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6321 { /* MCP73 Ethernet Controller */
6322 PCI_DEVICE(0x10DE, 0x07DC),
6323 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6325 { /* MCP73 Ethernet Controller */
6326 PCI_DEVICE(0x10DE, 0x07DD),
6327 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6329 { /* MCP73 Ethernet Controller */
6330 PCI_DEVICE(0x10DE, 0x07DE),
6331 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6333 { /* MCP73 Ethernet Controller */
6334 PCI_DEVICE(0x10DE, 0x07DF),
6335 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6337 { /* MCP77 Ethernet Controller */
6338 PCI_DEVICE(0x10DE, 0x0760),
6339 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6341 { /* MCP77 Ethernet Controller */
6342 PCI_DEVICE(0x10DE, 0x0761),
6343 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6345 { /* MCP77 Ethernet Controller */
6346 PCI_DEVICE(0x10DE, 0x0762),
6347 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6349 { /* MCP77 Ethernet Controller */
6350 PCI_DEVICE(0x10DE, 0x0763),
6351 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6353 { /* MCP79 Ethernet Controller */
6354 PCI_DEVICE(0x10DE, 0x0AB0),
6355 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6357 { /* MCP79 Ethernet Controller */
6358 PCI_DEVICE(0x10DE, 0x0AB1),
6359 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6361 { /* MCP79 Ethernet Controller */
6362 PCI_DEVICE(0x10DE, 0x0AB2),
6363 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6365 { /* MCP79 Ethernet Controller */
6366 PCI_DEVICE(0x10DE, 0x0AB3),
6367 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6369 { /* MCP89 Ethernet Controller */
6370 PCI_DEVICE(0x10DE, 0x0D7D),
6371 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6376 static struct pci_driver forcedeth_pci_driver = {
6378 .id_table = pci_tbl,
6380 .remove = nv_remove,
6381 .shutdown = nv_shutdown,
6382 .driver.pm = NV_PM_OPS,
6385 module_param(max_interrupt_work, int, 0);
6386 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6387 module_param(optimization_mode, int, 0);
6388 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6389 module_param(poll_interval, int, 0);
6390 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6391 module_param(msi, int, 0);
6392 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6393 module_param(msix, int, 0);
6394 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6395 module_param(dma_64bit, int, 0);
6396 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6397 module_param(phy_cross, int, 0);
6398 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6399 module_param(phy_power_down, int, 0);
6400 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6401 module_param(debug_tx_timeout, bool, 0);
6402 MODULE_PARM_DESC(debug_tx_timeout,
6403 "Dump tx related registers and ring when tx_timeout happens");
6405 module_pci_driver(forcedeth_pci_driver);
6406 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6407 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6408 MODULE_LICENSE("GPL");
6409 MODULE_DEVICE_TABLE(pci, pci_tbl);