2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
44 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME "lan78xx"
48 #define TX_TIMEOUT_JIFFIES (5 * HZ)
49 #define THROTTLE_JIFFIES (HZ / 8)
50 #define UNLINK_TIMEOUT_MS 3
52 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
54 #define SS_USB_PKT_SIZE (1024)
55 #define HS_USB_PKT_SIZE (512)
56 #define FS_USB_PKT_SIZE (64)
58 #define MAX_RX_FIFO_SIZE (12 * 1024)
59 #define MAX_TX_FIFO_SIZE (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE (9000)
63 #define DEFAULT_TX_CSUM_ENABLE (true)
64 #define DEFAULT_RX_CSUM_ENABLE (true)
65 #define DEFAULT_TSO_CSUM_ENABLE (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE (true)
67 #define DEFAULT_VLAN_RX_OFFLOAD (true)
68 #define TX_OVERHEAD (8)
71 #define LAN78XX_USB_VENDOR_ID (0x0424)
72 #define LAN7800_USB_PRODUCT_ID (0x7800)
73 #define LAN7850_USB_PRODUCT_ID (0x7850)
74 #define LAN7801_USB_PRODUCT_ID (0x7801)
75 #define LAN78XX_EEPROM_MAGIC (0x78A5)
76 #define LAN78XX_OTP_MAGIC (0x78F3)
81 #define EEPROM_INDICATOR (0xA5)
82 #define EEPROM_MAC_OFFSET (0x01)
83 #define MAX_EEPROM_SIZE 512
84 #define OTP_INDICATOR_1 (0xF3)
85 #define OTP_INDICATOR_2 (0xF7)
87 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
88 WAKE_MCAST | WAKE_BCAST | \
89 WAKE_ARP | WAKE_MAGIC)
91 /* USB related defines */
92 #define BULK_IN_PIPE 1
93 #define BULK_OUT_PIPE 2
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER (1 * 1000)
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP (32)
103 #define INT_EP_INTEP (31)
104 #define INT_EP_OTP_WR_DONE (28)
105 #define INT_EP_EEE_TX_LPI_START (26)
106 #define INT_EP_EEE_TX_LPI_STOP (25)
107 #define INT_EP_EEE_RX_LPI (24)
108 #define INT_EP_MAC_RESET_TIMEOUT (23)
109 #define INT_EP_RDFO (22)
110 #define INT_EP_TXE (21)
111 #define INT_EP_USB_STATUS (20)
112 #define INT_EP_TX_DIS (19)
113 #define INT_EP_RX_DIS (18)
114 #define INT_EP_PHY (17)
115 #define INT_EP_DP (16)
116 #define INT_EP_MAC_ERR (15)
117 #define INT_EP_TDFU (14)
118 #define INT_EP_TDFO (13)
119 #define INT_EP_UTX (12)
120 #define INT_EP_GPIO_11 (11)
121 #define INT_EP_GPIO_10 (10)
122 #define INT_EP_GPIO_9 (9)
123 #define INT_EP_GPIO_8 (8)
124 #define INT_EP_GPIO_7 (7)
125 #define INT_EP_GPIO_6 (6)
126 #define INT_EP_GPIO_5 (5)
127 #define INT_EP_GPIO_4 (4)
128 #define INT_EP_GPIO_3 (3)
129 #define INT_EP_GPIO_2 (2)
130 #define INT_EP_GPIO_1 (1)
131 #define INT_EP_GPIO_0 (0)
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
135 "RX Alignment Errors",
136 "Rx Fragment Errors",
138 "RX Undersize Frame Errors",
139 "RX Oversize Frame Errors",
141 "RX Unicast Byte Count",
142 "RX Broadcast Byte Count",
143 "RX Multicast Byte Count",
145 "RX Broadcast Frames",
146 "RX Multicast Frames",
149 "RX 65 - 127 Byte Frames",
150 "RX 128 - 255 Byte Frames",
151 "RX 256 - 511 Bytes Frames",
152 "RX 512 - 1023 Byte Frames",
153 "RX 1024 - 1518 Byte Frames",
154 "RX Greater 1518 Byte Frames",
155 "EEE RX LPI Transitions",
158 "TX Excess Deferral Errors",
161 "TX Single Collisions",
162 "TX Multiple Collisions",
163 "TX Excessive Collision",
164 "TX Late Collisions",
165 "TX Unicast Byte Count",
166 "TX Broadcast Byte Count",
167 "TX Multicast Byte Count",
169 "TX Broadcast Frames",
170 "TX Multicast Frames",
173 "TX 65 - 127 Byte Frames",
174 "TX 128 - 255 Byte Frames",
175 "TX 256 - 511 Bytes Frames",
176 "TX 512 - 1023 Byte Frames",
177 "TX 1024 - 1518 Byte Frames",
178 "TX Greater 1518 Byte Frames",
179 "EEE TX LPI Transitions",
183 struct lan78xx_statstage {
185 u32 rx_alignment_errors;
186 u32 rx_fragment_errors;
187 u32 rx_jabber_errors;
188 u32 rx_undersize_frame_errors;
189 u32 rx_oversize_frame_errors;
190 u32 rx_dropped_frames;
191 u32 rx_unicast_byte_count;
192 u32 rx_broadcast_byte_count;
193 u32 rx_multicast_byte_count;
194 u32 rx_unicast_frames;
195 u32 rx_broadcast_frames;
196 u32 rx_multicast_frames;
198 u32 rx_64_byte_frames;
199 u32 rx_65_127_byte_frames;
200 u32 rx_128_255_byte_frames;
201 u32 rx_256_511_bytes_frames;
202 u32 rx_512_1023_byte_frames;
203 u32 rx_1024_1518_byte_frames;
204 u32 rx_greater_1518_byte_frames;
205 u32 eee_rx_lpi_transitions;
208 u32 tx_excess_deferral_errors;
209 u32 tx_carrier_errors;
210 u32 tx_bad_byte_count;
211 u32 tx_single_collisions;
212 u32 tx_multiple_collisions;
213 u32 tx_excessive_collision;
214 u32 tx_late_collisions;
215 u32 tx_unicast_byte_count;
216 u32 tx_broadcast_byte_count;
217 u32 tx_multicast_byte_count;
218 u32 tx_unicast_frames;
219 u32 tx_broadcast_frames;
220 u32 tx_multicast_frames;
222 u32 tx_64_byte_frames;
223 u32 tx_65_127_byte_frames;
224 u32 tx_128_255_byte_frames;
225 u32 tx_256_511_bytes_frames;
226 u32 tx_512_1023_byte_frames;
227 u32 tx_1024_1518_byte_frames;
228 u32 tx_greater_1518_byte_frames;
229 u32 eee_tx_lpi_transitions;
233 struct lan78xx_statstage64 {
235 u64 rx_alignment_errors;
236 u64 rx_fragment_errors;
237 u64 rx_jabber_errors;
238 u64 rx_undersize_frame_errors;
239 u64 rx_oversize_frame_errors;
240 u64 rx_dropped_frames;
241 u64 rx_unicast_byte_count;
242 u64 rx_broadcast_byte_count;
243 u64 rx_multicast_byte_count;
244 u64 rx_unicast_frames;
245 u64 rx_broadcast_frames;
246 u64 rx_multicast_frames;
248 u64 rx_64_byte_frames;
249 u64 rx_65_127_byte_frames;
250 u64 rx_128_255_byte_frames;
251 u64 rx_256_511_bytes_frames;
252 u64 rx_512_1023_byte_frames;
253 u64 rx_1024_1518_byte_frames;
254 u64 rx_greater_1518_byte_frames;
255 u64 eee_rx_lpi_transitions;
258 u64 tx_excess_deferral_errors;
259 u64 tx_carrier_errors;
260 u64 tx_bad_byte_count;
261 u64 tx_single_collisions;
262 u64 tx_multiple_collisions;
263 u64 tx_excessive_collision;
264 u64 tx_late_collisions;
265 u64 tx_unicast_byte_count;
266 u64 tx_broadcast_byte_count;
267 u64 tx_multicast_byte_count;
268 u64 tx_unicast_frames;
269 u64 tx_broadcast_frames;
270 u64 tx_multicast_frames;
272 u64 tx_64_byte_frames;
273 u64 tx_65_127_byte_frames;
274 u64 tx_128_255_byte_frames;
275 u64 tx_256_511_bytes_frames;
276 u64 tx_512_1023_byte_frames;
277 u64 tx_1024_1518_byte_frames;
278 u64 tx_greater_1518_byte_frames;
279 u64 eee_tx_lpi_transitions;
283 static u32 lan78xx_regs[] = {
305 #define PHY_REG_SIZE (32 * sizeof(u32))
309 struct lan78xx_priv {
310 struct lan78xx_net *dev;
312 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
313 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
314 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
315 struct mutex dataport_mutex; /* for dataport access */
316 spinlock_t rfe_ctl_lock; /* for rfe register access */
317 struct work_struct set_multicast;
318 struct work_struct set_vlan;
332 struct skb_data { /* skb->cb is one of these */
334 struct lan78xx_net *dev;
335 enum skb_state state;
341 struct usb_ctrlrequest req;
342 struct lan78xx_net *dev;
345 #define EVENT_TX_HALT 0
346 #define EVENT_RX_HALT 1
347 #define EVENT_RX_MEMORY 2
348 #define EVENT_STS_SPLIT 3
349 #define EVENT_LINK_RESET 4
350 #define EVENT_RX_PAUSED 5
351 #define EVENT_DEV_WAKING 6
352 #define EVENT_DEV_ASLEEP 7
353 #define EVENT_DEV_OPEN 8
354 #define EVENT_STAT_UPDATE 9
357 struct mutex access_lock; /* for stats access */
358 struct lan78xx_statstage saved;
359 struct lan78xx_statstage rollover_count;
360 struct lan78xx_statstage rollover_max;
361 struct lan78xx_statstage64 curr_stat;
364 struct irq_domain_data {
365 struct irq_domain *irqdomain;
367 struct irq_chip *irqchip;
368 irq_flow_handler_t irq_handler;
370 struct mutex irq_lock; /* for irq bus access */
374 struct net_device *net;
375 struct usb_device *udev;
376 struct usb_interface *intf;
381 struct sk_buff_head rxq;
382 struct sk_buff_head txq;
383 struct sk_buff_head done;
384 struct sk_buff_head rxq_pause;
385 struct sk_buff_head txq_pend;
387 struct tasklet_struct bh;
388 struct delayed_work wq;
390 struct usb_host_endpoint *ep_blkin;
391 struct usb_host_endpoint *ep_blkout;
392 struct usb_host_endpoint *ep_intr;
396 struct urb *urb_intr;
397 struct usb_anchor deferred;
399 struct mutex phy_mutex; /* for phy access */
400 unsigned pipe_in, pipe_out, pipe_intr;
402 u32 hard_mtu; /* count any extra framing */
403 size_t rx_urb_size; /* size for rx urbs */
407 wait_queue_head_t *wait;
408 unsigned char suspend_count;
411 struct timer_list delay;
412 struct timer_list stat_monitor;
414 unsigned long data[5];
421 struct mii_bus *mdiobus;
422 phy_interface_t interface;
425 u8 fc_request_control;
428 struct statstage stats;
430 struct irq_domain_data domain_data;
433 /* define external phy id */
434 #define PHY_LAN8835 (0x0007C130)
435 #define PHY_KSZ9031RNX (0x00221620)
437 /* use ethtool to change the level for any given device */
438 static int msg_level = -1;
439 module_param(msg_level, int, 0);
440 MODULE_PARM_DESC(msg_level, "Override default message level");
442 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
444 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
450 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
451 USB_VENDOR_REQUEST_READ_REGISTER,
452 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
453 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
454 if (likely(ret >= 0)) {
458 netdev_warn(dev->net,
459 "Failed to read register index 0x%08x. ret = %d",
468 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
470 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
479 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
480 USB_VENDOR_REQUEST_WRITE_REGISTER,
481 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
482 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
483 if (unlikely(ret < 0)) {
484 netdev_warn(dev->net,
485 "Failed to write register index 0x%08x. ret = %d",
494 static int lan78xx_read_stats(struct lan78xx_net *dev,
495 struct lan78xx_statstage *data)
499 struct lan78xx_statstage *stats;
503 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
507 ret = usb_control_msg(dev->udev,
508 usb_rcvctrlpipe(dev->udev, 0),
509 USB_VENDOR_REQUEST_GET_STATS,
510 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
515 USB_CTRL_SET_TIMEOUT);
516 if (likely(ret >= 0)) {
519 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
520 le32_to_cpus(&src[i]);
524 netdev_warn(dev->net,
525 "Failed to read stat ret = 0x%x", ret);
533 #define check_counter_rollover(struct1, dev_stats, member) { \
534 if (struct1->member < dev_stats.saved.member) \
535 dev_stats.rollover_count.member++; \
538 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
539 struct lan78xx_statstage *stats)
541 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
542 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
543 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
544 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
545 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
546 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
547 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
548 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
549 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
550 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
551 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
552 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
553 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
554 check_counter_rollover(stats, dev->stats, rx_pause_frames);
555 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
556 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
557 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
558 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
559 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
560 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
561 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
562 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
563 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
564 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
565 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
566 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
567 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
568 check_counter_rollover(stats, dev->stats, tx_single_collisions);
569 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
570 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
571 check_counter_rollover(stats, dev->stats, tx_late_collisions);
572 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
573 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
574 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
575 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
576 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
577 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
578 check_counter_rollover(stats, dev->stats, tx_pause_frames);
579 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
580 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
581 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
582 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
583 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
584 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
585 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
586 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
587 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
589 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
592 static void lan78xx_update_stats(struct lan78xx_net *dev)
594 u32 *p, *count, *max;
597 struct lan78xx_statstage lan78xx_stats;
599 if (usb_autopm_get_interface(dev->intf) < 0)
602 p = (u32 *)&lan78xx_stats;
603 count = (u32 *)&dev->stats.rollover_count;
604 max = (u32 *)&dev->stats.rollover_max;
605 data = (u64 *)&dev->stats.curr_stat;
607 mutex_lock(&dev->stats.access_lock);
609 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
610 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
612 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
613 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
615 mutex_unlock(&dev->stats.access_lock);
617 usb_autopm_put_interface(dev->intf);
620 /* Loop until the read is completed with timeout called with phy_mutex held */
621 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
623 unsigned long start_time = jiffies;
628 ret = lan78xx_read_reg(dev, MII_ACC, &val);
629 if (unlikely(ret < 0))
632 if (!(val & MII_ACC_MII_BUSY_))
634 } while (!time_after(jiffies, start_time + HZ));
639 static inline u32 mii_access(int id, int index, int read)
643 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
644 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
646 ret |= MII_ACC_MII_READ_;
648 ret |= MII_ACC_MII_WRITE_;
649 ret |= MII_ACC_MII_BUSY_;
654 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
656 unsigned long start_time = jiffies;
661 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
662 if (unlikely(ret < 0))
665 if (!(val & E2P_CMD_EPC_BUSY_) ||
666 (val & E2P_CMD_EPC_TIMEOUT_))
668 usleep_range(40, 100);
669 } while (!time_after(jiffies, start_time + HZ));
671 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
672 netdev_warn(dev->net, "EEPROM read operation timeout");
679 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
681 unsigned long start_time = jiffies;
686 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
687 if (unlikely(ret < 0))
690 if (!(val & E2P_CMD_EPC_BUSY_))
693 usleep_range(40, 100);
694 } while (!time_after(jiffies, start_time + HZ));
696 netdev_warn(dev->net, "EEPROM is busy");
700 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
701 u32 length, u8 *data)
708 /* depends on chip, some EEPROM pins are muxed with LED function.
709 * disable & restore LED function to access EEPROM.
711 ret = lan78xx_read_reg(dev, HW_CFG, &val);
713 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
714 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
715 ret = lan78xx_write_reg(dev, HW_CFG, val);
718 retval = lan78xx_eeprom_confirm_not_busy(dev);
722 for (i = 0; i < length; i++) {
723 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
724 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
725 ret = lan78xx_write_reg(dev, E2P_CMD, val);
726 if (unlikely(ret < 0)) {
731 retval = lan78xx_wait_eeprom(dev);
735 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
736 if (unlikely(ret < 0)) {
741 data[i] = val & 0xFF;
747 if (dev->chipid == ID_REV_CHIP_ID_7800_)
748 ret = lan78xx_write_reg(dev, HW_CFG, saved);
753 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
754 u32 length, u8 *data)
759 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
760 if ((ret == 0) && (sig == EEPROM_INDICATOR))
761 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
768 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
769 u32 length, u8 *data)
776 /* depends on chip, some EEPROM pins are muxed with LED function.
777 * disable & restore LED function to access EEPROM.
779 ret = lan78xx_read_reg(dev, HW_CFG, &val);
781 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
782 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
783 ret = lan78xx_write_reg(dev, HW_CFG, val);
786 retval = lan78xx_eeprom_confirm_not_busy(dev);
790 /* Issue write/erase enable command */
791 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
792 ret = lan78xx_write_reg(dev, E2P_CMD, val);
793 if (unlikely(ret < 0)) {
798 retval = lan78xx_wait_eeprom(dev);
802 for (i = 0; i < length; i++) {
803 /* Fill data register */
805 ret = lan78xx_write_reg(dev, E2P_DATA, val);
811 /* Send "write" command */
812 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
813 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
814 ret = lan78xx_write_reg(dev, E2P_CMD, val);
820 retval = lan78xx_wait_eeprom(dev);
829 if (dev->chipid == ID_REV_CHIP_ID_7800_)
830 ret = lan78xx_write_reg(dev, HW_CFG, saved);
835 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
836 u32 length, u8 *data)
841 unsigned long timeout;
843 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
845 if (buf & OTP_PWR_DN_PWRDN_N_) {
846 /* clear it and wait to be cleared */
847 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
849 timeout = jiffies + HZ;
852 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
853 if (time_after(jiffies, timeout)) {
854 netdev_warn(dev->net,
855 "timeout on OTP_PWR_DN");
858 } while (buf & OTP_PWR_DN_PWRDN_N_);
861 for (i = 0; i < length; i++) {
862 ret = lan78xx_write_reg(dev, OTP_ADDR1,
863 ((offset + i) >> 8) & OTP_ADDR1_15_11);
864 ret = lan78xx_write_reg(dev, OTP_ADDR2,
865 ((offset + i) & OTP_ADDR2_10_3));
867 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
868 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
870 timeout = jiffies + HZ;
873 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
874 if (time_after(jiffies, timeout)) {
875 netdev_warn(dev->net,
876 "timeout on OTP_STATUS");
879 } while (buf & OTP_STATUS_BUSY_);
881 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
883 data[i] = (u8)(buf & 0xFF);
889 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
890 u32 length, u8 *data)
895 unsigned long timeout;
897 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
899 if (buf & OTP_PWR_DN_PWRDN_N_) {
900 /* clear it and wait to be cleared */
901 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
903 timeout = jiffies + HZ;
906 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
907 if (time_after(jiffies, timeout)) {
908 netdev_warn(dev->net,
909 "timeout on OTP_PWR_DN completion");
912 } while (buf & OTP_PWR_DN_PWRDN_N_);
915 /* set to BYTE program mode */
916 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
918 for (i = 0; i < length; i++) {
919 ret = lan78xx_write_reg(dev, OTP_ADDR1,
920 ((offset + i) >> 8) & OTP_ADDR1_15_11);
921 ret = lan78xx_write_reg(dev, OTP_ADDR2,
922 ((offset + i) & OTP_ADDR2_10_3));
923 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
924 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
925 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
927 timeout = jiffies + HZ;
930 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
931 if (time_after(jiffies, timeout)) {
932 netdev_warn(dev->net,
933 "Timeout on OTP_STATUS completion");
936 } while (buf & OTP_STATUS_BUSY_);
942 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
943 u32 length, u8 *data)
948 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
951 if (sig == OTP_INDICATOR_1)
953 else if (sig == OTP_INDICATOR_2)
958 ret = lan78xx_read_raw_otp(dev, offset, length, data);
964 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
968 for (i = 0; i < 100; i++) {
971 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
972 if (unlikely(ret < 0))
975 if (dp_sel & DP_SEL_DPRDY_)
978 usleep_range(40, 100);
981 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
986 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
987 u32 addr, u32 length, u32 *buf)
989 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
993 if (usb_autopm_get_interface(dev->intf) < 0)
996 mutex_lock(&pdata->dataport_mutex);
998 ret = lan78xx_dataport_wait_not_busy(dev);
1002 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1004 dp_sel &= ~DP_SEL_RSEL_MASK_;
1005 dp_sel |= ram_select;
1006 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1008 for (i = 0; i < length; i++) {
1009 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1011 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1013 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1015 ret = lan78xx_dataport_wait_not_busy(dev);
1021 mutex_unlock(&pdata->dataport_mutex);
1022 usb_autopm_put_interface(dev->intf);
1027 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1028 int index, u8 addr[ETH_ALEN])
1032 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1034 temp = addr[2] | (temp << 8);
1035 temp = addr[1] | (temp << 8);
1036 temp = addr[0] | (temp << 8);
1037 pdata->pfilter_table[index][1] = temp;
1039 temp = addr[4] | (temp << 8);
1040 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1041 pdata->pfilter_table[index][0] = temp;
1045 /* returns hash bit number for given MAC address */
1046 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1048 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1051 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1053 struct lan78xx_priv *pdata =
1054 container_of(param, struct lan78xx_priv, set_multicast);
1055 struct lan78xx_net *dev = pdata->dev;
1059 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1062 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1063 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1065 for (i = 1; i < NUM_OF_MAF; i++) {
1066 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1067 ret = lan78xx_write_reg(dev, MAF_LO(i),
1068 pdata->pfilter_table[i][1]);
1069 ret = lan78xx_write_reg(dev, MAF_HI(i),
1070 pdata->pfilter_table[i][0]);
1073 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1076 static void lan78xx_set_multicast(struct net_device *netdev)
1078 struct lan78xx_net *dev = netdev_priv(netdev);
1079 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1080 unsigned long flags;
1083 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1085 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1086 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1088 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1089 pdata->mchash_table[i] = 0;
1090 /* pfilter_table[0] has own HW address */
1091 for (i = 1; i < NUM_OF_MAF; i++) {
1092 pdata->pfilter_table[i][0] =
1093 pdata->pfilter_table[i][1] = 0;
1096 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1098 if (dev->net->flags & IFF_PROMISC) {
1099 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1100 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1102 if (dev->net->flags & IFF_ALLMULTI) {
1103 netif_dbg(dev, drv, dev->net,
1104 "receive all multicast enabled");
1105 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1109 if (netdev_mc_count(dev->net)) {
1110 struct netdev_hw_addr *ha;
1113 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1115 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1118 netdev_for_each_mc_addr(ha, netdev) {
1119 /* set first 32 into Perfect Filter */
1121 lan78xx_set_addr_filter(pdata, i, ha->addr);
1123 u32 bitnum = lan78xx_hash(ha->addr);
1125 pdata->mchash_table[bitnum / 32] |=
1126 (1 << (bitnum % 32));
1127 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1133 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1135 /* defer register writes to a sleepable context */
1136 schedule_work(&pdata->set_multicast);
1139 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1140 u16 lcladv, u16 rmtadv)
1142 u32 flow = 0, fct_flow = 0;
1146 if (dev->fc_autoneg)
1147 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1149 cap = dev->fc_request_control;
1151 if (cap & FLOW_CTRL_TX)
1152 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1154 if (cap & FLOW_CTRL_RX)
1155 flow |= FLOW_CR_RX_FCEN_;
1157 if (dev->udev->speed == USB_SPEED_SUPER)
1159 else if (dev->udev->speed == USB_SPEED_HIGH)
1162 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1163 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1164 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1166 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1168 /* threshold value should be set before enabling flow */
1169 ret = lan78xx_write_reg(dev, FLOW, flow);
1174 static int lan78xx_link_reset(struct lan78xx_net *dev)
1176 struct phy_device *phydev = dev->net->phydev;
1177 struct ethtool_link_ksettings ecmd;
1178 int ladv, radv, ret;
1181 /* clear LAN78xx interrupt status */
1182 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1183 if (unlikely(ret < 0))
1186 phy_read_status(phydev);
1188 if (!phydev->link && dev->link_on) {
1189 dev->link_on = false;
1192 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1193 if (unlikely(ret < 0))
1196 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1197 if (unlikely(ret < 0))
1200 del_timer(&dev->stat_monitor);
1201 } else if (phydev->link && !dev->link_on) {
1202 dev->link_on = true;
1204 phy_ethtool_ksettings_get(phydev, &ecmd);
1206 if (dev->udev->speed == USB_SPEED_SUPER) {
1207 if (ecmd.base.speed == 1000) {
1209 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1210 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1211 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1213 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1214 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1215 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1217 /* enable U1 & U2 */
1218 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1219 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1220 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1221 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1225 ladv = phy_read(phydev, MII_ADVERTISE);
1229 radv = phy_read(phydev, MII_LPA);
1233 netif_dbg(dev, link, dev->net,
1234 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1235 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1237 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1240 if (!timer_pending(&dev->stat_monitor)) {
1242 mod_timer(&dev->stat_monitor,
1243 jiffies + STAT_UPDATE_TIMER);
1250 /* some work can't be done in tasklets, so we use keventd
1252 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1253 * but tasklet_schedule() doesn't. hope the failure is rare.
1255 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1257 set_bit(work, &dev->flags);
1258 if (!schedule_delayed_work(&dev->wq, 0))
1259 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1262 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1266 if (urb->actual_length != 4) {
1267 netdev_warn(dev->net,
1268 "unexpected urb length %d", urb->actual_length);
1272 memcpy(&intdata, urb->transfer_buffer, 4);
1273 le32_to_cpus(&intdata);
1275 if (intdata & INT_ENP_PHY_INT) {
1276 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1277 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1279 if (dev->domain_data.phyirq > 0)
1280 generic_handle_irq(dev->domain_data.phyirq);
1282 netdev_warn(dev->net,
1283 "unexpected interrupt: 0x%08x\n", intdata);
1286 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1288 return MAX_EEPROM_SIZE;
1291 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1292 struct ethtool_eeprom *ee, u8 *data)
1294 struct lan78xx_net *dev = netdev_priv(netdev);
1297 ret = usb_autopm_get_interface(dev->intf);
1301 ee->magic = LAN78XX_EEPROM_MAGIC;
1303 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1305 usb_autopm_put_interface(dev->intf);
1310 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1311 struct ethtool_eeprom *ee, u8 *data)
1313 struct lan78xx_net *dev = netdev_priv(netdev);
1316 ret = usb_autopm_get_interface(dev->intf);
1320 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1321 * to load data from EEPROM
1323 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1324 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1325 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1326 (ee->offset == 0) &&
1328 (data[0] == OTP_INDICATOR_1))
1329 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1331 usb_autopm_put_interface(dev->intf);
1336 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1339 if (stringset == ETH_SS_STATS)
1340 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1343 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1345 if (sset == ETH_SS_STATS)
1346 return ARRAY_SIZE(lan78xx_gstrings);
1351 static void lan78xx_get_stats(struct net_device *netdev,
1352 struct ethtool_stats *stats, u64 *data)
1354 struct lan78xx_net *dev = netdev_priv(netdev);
1356 lan78xx_update_stats(dev);
1358 mutex_lock(&dev->stats.access_lock);
1359 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1360 mutex_unlock(&dev->stats.access_lock);
1363 static void lan78xx_get_wol(struct net_device *netdev,
1364 struct ethtool_wolinfo *wol)
1366 struct lan78xx_net *dev = netdev_priv(netdev);
1369 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1371 if (usb_autopm_get_interface(dev->intf) < 0)
1374 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1375 if (unlikely(ret < 0)) {
1379 if (buf & USB_CFG_RMT_WKP_) {
1380 wol->supported = WAKE_ALL;
1381 wol->wolopts = pdata->wol;
1388 usb_autopm_put_interface(dev->intf);
1391 static int lan78xx_set_wol(struct net_device *netdev,
1392 struct ethtool_wolinfo *wol)
1394 struct lan78xx_net *dev = netdev_priv(netdev);
1395 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1398 ret = usb_autopm_get_interface(dev->intf);
1403 if (wol->wolopts & WAKE_UCAST)
1404 pdata->wol |= WAKE_UCAST;
1405 if (wol->wolopts & WAKE_MCAST)
1406 pdata->wol |= WAKE_MCAST;
1407 if (wol->wolopts & WAKE_BCAST)
1408 pdata->wol |= WAKE_BCAST;
1409 if (wol->wolopts & WAKE_MAGIC)
1410 pdata->wol |= WAKE_MAGIC;
1411 if (wol->wolopts & WAKE_PHY)
1412 pdata->wol |= WAKE_PHY;
1413 if (wol->wolopts & WAKE_ARP)
1414 pdata->wol |= WAKE_ARP;
1416 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1418 phy_ethtool_set_wol(netdev->phydev, wol);
1420 usb_autopm_put_interface(dev->intf);
1425 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1427 struct lan78xx_net *dev = netdev_priv(net);
1428 struct phy_device *phydev = net->phydev;
1432 ret = usb_autopm_get_interface(dev->intf);
1436 ret = phy_ethtool_get_eee(phydev, edata);
1440 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441 if (buf & MAC_CR_EEE_EN_) {
1442 edata->eee_enabled = true;
1443 edata->eee_active = !!(edata->advertised &
1444 edata->lp_advertised);
1445 edata->tx_lpi_enabled = true;
1446 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1447 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1448 edata->tx_lpi_timer = buf;
1450 edata->eee_enabled = false;
1451 edata->eee_active = false;
1452 edata->tx_lpi_enabled = false;
1453 edata->tx_lpi_timer = 0;
1458 usb_autopm_put_interface(dev->intf);
1463 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1465 struct lan78xx_net *dev = netdev_priv(net);
1469 ret = usb_autopm_get_interface(dev->intf);
1473 if (edata->eee_enabled) {
1474 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1475 buf |= MAC_CR_EEE_EN_;
1476 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1478 phy_ethtool_set_eee(net->phydev, edata);
1480 buf = (u32)edata->tx_lpi_timer;
1481 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1483 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1484 buf &= ~MAC_CR_EEE_EN_;
1485 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1488 usb_autopm_put_interface(dev->intf);
1493 static u32 lan78xx_get_link(struct net_device *net)
1495 phy_read_status(net->phydev);
1497 return net->phydev->link;
1500 static void lan78xx_get_drvinfo(struct net_device *net,
1501 struct ethtool_drvinfo *info)
1503 struct lan78xx_net *dev = netdev_priv(net);
1505 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1506 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1509 static u32 lan78xx_get_msglevel(struct net_device *net)
1511 struct lan78xx_net *dev = netdev_priv(net);
1513 return dev->msg_enable;
1516 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1518 struct lan78xx_net *dev = netdev_priv(net);
1520 dev->msg_enable = level;
1523 static int lan78xx_get_link_ksettings(struct net_device *net,
1524 struct ethtool_link_ksettings *cmd)
1526 struct lan78xx_net *dev = netdev_priv(net);
1527 struct phy_device *phydev = net->phydev;
1530 ret = usb_autopm_get_interface(dev->intf);
1534 phy_ethtool_ksettings_get(phydev, cmd);
1536 usb_autopm_put_interface(dev->intf);
1541 static int lan78xx_set_link_ksettings(struct net_device *net,
1542 const struct ethtool_link_ksettings *cmd)
1544 struct lan78xx_net *dev = netdev_priv(net);
1545 struct phy_device *phydev = net->phydev;
1549 ret = usb_autopm_get_interface(dev->intf);
1553 /* change speed & duplex */
1554 ret = phy_ethtool_ksettings_set(phydev, cmd);
1556 if (!cmd->base.autoneg) {
1557 /* force link down */
1558 temp = phy_read(phydev, MII_BMCR);
1559 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1561 phy_write(phydev, MII_BMCR, temp);
1564 usb_autopm_put_interface(dev->intf);
1569 static void lan78xx_get_pause(struct net_device *net,
1570 struct ethtool_pauseparam *pause)
1572 struct lan78xx_net *dev = netdev_priv(net);
1573 struct phy_device *phydev = net->phydev;
1574 struct ethtool_link_ksettings ecmd;
1576 phy_ethtool_ksettings_get(phydev, &ecmd);
1578 pause->autoneg = dev->fc_autoneg;
1580 if (dev->fc_request_control & FLOW_CTRL_TX)
1581 pause->tx_pause = 1;
1583 if (dev->fc_request_control & FLOW_CTRL_RX)
1584 pause->rx_pause = 1;
1587 static int lan78xx_set_pause(struct net_device *net,
1588 struct ethtool_pauseparam *pause)
1590 struct lan78xx_net *dev = netdev_priv(net);
1591 struct phy_device *phydev = net->phydev;
1592 struct ethtool_link_ksettings ecmd;
1595 phy_ethtool_ksettings_get(phydev, &ecmd);
1597 if (pause->autoneg && !ecmd.base.autoneg) {
1602 dev->fc_request_control = 0;
1603 if (pause->rx_pause)
1604 dev->fc_request_control |= FLOW_CTRL_RX;
1606 if (pause->tx_pause)
1607 dev->fc_request_control |= FLOW_CTRL_TX;
1609 if (ecmd.base.autoneg) {
1613 ethtool_convert_link_mode_to_legacy_u32(
1614 &advertising, ecmd.link_modes.advertising);
1616 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1617 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1618 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1620 ethtool_convert_legacy_u32_to_link_mode(
1621 ecmd.link_modes.advertising, advertising);
1623 phy_ethtool_ksettings_set(phydev, &ecmd);
1626 dev->fc_autoneg = pause->autoneg;
1633 static int lan78xx_get_regs_len(struct net_device *netdev)
1635 if (!netdev->phydev)
1636 return (sizeof(lan78xx_regs));
1638 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1642 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1647 struct lan78xx_net *dev = netdev_priv(netdev);
1649 /* Read Device/MAC registers */
1650 for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1651 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1653 if (!netdev->phydev)
1656 /* Read PHY registers */
1657 for (j = 0; j < 32; i++, j++)
1658 data[i] = phy_read(netdev->phydev, j);
1661 static const struct ethtool_ops lan78xx_ethtool_ops = {
1662 .get_link = lan78xx_get_link,
1663 .nway_reset = phy_ethtool_nway_reset,
1664 .get_drvinfo = lan78xx_get_drvinfo,
1665 .get_msglevel = lan78xx_get_msglevel,
1666 .set_msglevel = lan78xx_set_msglevel,
1667 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1668 .get_eeprom = lan78xx_ethtool_get_eeprom,
1669 .set_eeprom = lan78xx_ethtool_set_eeprom,
1670 .get_ethtool_stats = lan78xx_get_stats,
1671 .get_sset_count = lan78xx_get_sset_count,
1672 .get_strings = lan78xx_get_strings,
1673 .get_wol = lan78xx_get_wol,
1674 .set_wol = lan78xx_set_wol,
1675 .get_eee = lan78xx_get_eee,
1676 .set_eee = lan78xx_set_eee,
1677 .get_pauseparam = lan78xx_get_pause,
1678 .set_pauseparam = lan78xx_set_pause,
1679 .get_link_ksettings = lan78xx_get_link_ksettings,
1680 .set_link_ksettings = lan78xx_set_link_ksettings,
1681 .get_regs_len = lan78xx_get_regs_len,
1682 .get_regs = lan78xx_get_regs,
1685 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1687 if (!netif_running(netdev))
1690 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1693 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1695 u32 addr_lo, addr_hi;
1699 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1700 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1702 addr[0] = addr_lo & 0xFF;
1703 addr[1] = (addr_lo >> 8) & 0xFF;
1704 addr[2] = (addr_lo >> 16) & 0xFF;
1705 addr[3] = (addr_lo >> 24) & 0xFF;
1706 addr[4] = addr_hi & 0xFF;
1707 addr[5] = (addr_hi >> 8) & 0xFF;
1709 if (!is_valid_ether_addr(addr)) {
1710 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1711 /* valid address present in Device Tree */
1712 netif_dbg(dev, ifup, dev->net,
1713 "MAC address read from Device Tree");
1714 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1715 ETH_ALEN, addr) == 0) ||
1716 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1717 ETH_ALEN, addr) == 0)) &&
1718 is_valid_ether_addr(addr)) {
1719 /* eeprom values are valid so use them */
1720 netif_dbg(dev, ifup, dev->net,
1721 "MAC address read from EEPROM");
1723 /* generate random MAC */
1724 random_ether_addr(addr);
1725 netif_dbg(dev, ifup, dev->net,
1726 "MAC address set to random addr");
1729 addr_lo = addr[0] | (addr[1] << 8) |
1730 (addr[2] << 16) | (addr[3] << 24);
1731 addr_hi = addr[4] | (addr[5] << 8);
1733 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1734 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1737 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1738 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1740 ether_addr_copy(dev->net->dev_addr, addr);
1743 /* MDIO read and write wrappers for phylib */
1744 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1746 struct lan78xx_net *dev = bus->priv;
1750 ret = usb_autopm_get_interface(dev->intf);
1754 mutex_lock(&dev->phy_mutex);
1756 /* confirm MII not busy */
1757 ret = lan78xx_phy_wait_not_busy(dev);
1761 /* set the address, index & direction (read from PHY) */
1762 addr = mii_access(phy_id, idx, MII_READ);
1763 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1765 ret = lan78xx_phy_wait_not_busy(dev);
1769 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1771 ret = (int)(val & 0xFFFF);
1774 mutex_unlock(&dev->phy_mutex);
1775 usb_autopm_put_interface(dev->intf);
1780 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1783 struct lan78xx_net *dev = bus->priv;
1787 ret = usb_autopm_get_interface(dev->intf);
1791 mutex_lock(&dev->phy_mutex);
1793 /* confirm MII not busy */
1794 ret = lan78xx_phy_wait_not_busy(dev);
1799 ret = lan78xx_write_reg(dev, MII_DATA, val);
1801 /* set the address, index & direction (write to PHY) */
1802 addr = mii_access(phy_id, idx, MII_WRITE);
1803 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1805 ret = lan78xx_phy_wait_not_busy(dev);
1810 mutex_unlock(&dev->phy_mutex);
1811 usb_autopm_put_interface(dev->intf);
1815 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1817 struct device_node *node;
1820 dev->mdiobus = mdiobus_alloc();
1821 if (!dev->mdiobus) {
1822 netdev_err(dev->net, "can't allocate MDIO bus\n");
1826 dev->mdiobus->priv = (void *)dev;
1827 dev->mdiobus->read = lan78xx_mdiobus_read;
1828 dev->mdiobus->write = lan78xx_mdiobus_write;
1829 dev->mdiobus->name = "lan78xx-mdiobus";
1831 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1832 dev->udev->bus->busnum, dev->udev->devnum);
1834 switch (dev->chipid) {
1835 case ID_REV_CHIP_ID_7800_:
1836 case ID_REV_CHIP_ID_7850_:
1837 /* set to internal PHY id */
1838 dev->mdiobus->phy_mask = ~(1 << 1);
1840 case ID_REV_CHIP_ID_7801_:
1841 /* scan thru PHYAD[2..0] */
1842 dev->mdiobus->phy_mask = ~(0xFF);
1846 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1847 ret = of_mdiobus_register(dev->mdiobus, node);
1851 netdev_err(dev->net, "can't register MDIO bus\n");
1855 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1858 mdiobus_free(dev->mdiobus);
1862 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1864 mdiobus_unregister(dev->mdiobus);
1865 mdiobus_free(dev->mdiobus);
1868 static void lan78xx_link_status_change(struct net_device *net)
1870 struct phy_device *phydev = net->phydev;
1873 /* At forced 100 F/H mode, chip may fail to set mode correctly
1874 * when cable is switched between long(~50+m) and short one.
1875 * As workaround, set to 10 before setting to 100
1876 * at forced 100 F/H mode.
1878 if (!phydev->autoneg && (phydev->speed == 100)) {
1879 /* disable phy interrupt */
1880 temp = phy_read(phydev, LAN88XX_INT_MASK);
1881 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1882 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1884 temp = phy_read(phydev, MII_BMCR);
1885 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1886 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1887 temp |= BMCR_SPEED100;
1888 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1890 /* clear pending interrupt generated while workaround */
1891 temp = phy_read(phydev, LAN88XX_INT_STS);
1893 /* enable phy interrupt back */
1894 temp = phy_read(phydev, LAN88XX_INT_MASK);
1895 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1896 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1900 static int irq_map(struct irq_domain *d, unsigned int irq,
1901 irq_hw_number_t hwirq)
1903 struct irq_domain_data *data = d->host_data;
1905 irq_set_chip_data(irq, data);
1906 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1907 irq_set_noprobe(irq);
1912 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1914 irq_set_chip_and_handler(irq, NULL, NULL);
1915 irq_set_chip_data(irq, NULL);
1918 static const struct irq_domain_ops chip_domain_ops = {
1923 static void lan78xx_irq_mask(struct irq_data *irqd)
1925 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1930 static void lan78xx_irq_unmask(struct irq_data *irqd)
1932 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1934 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1937 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1939 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1941 mutex_lock(&data->irq_lock);
1944 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1946 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1947 struct lan78xx_net *dev =
1948 container_of(data, struct lan78xx_net, domain_data);
1952 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1953 * are only two callbacks executed in non-atomic contex.
1955 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1956 if (buf != data->irqenable)
1957 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1959 mutex_unlock(&data->irq_lock);
1962 static struct irq_chip lan78xx_irqchip = {
1963 .name = "lan78xx-irqs",
1964 .irq_mask = lan78xx_irq_mask,
1965 .irq_unmask = lan78xx_irq_unmask,
1966 .irq_bus_lock = lan78xx_irq_bus_lock,
1967 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1970 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1972 struct device_node *of_node;
1973 struct irq_domain *irqdomain;
1974 unsigned int irqmap = 0;
1978 of_node = dev->udev->dev.parent->of_node;
1980 mutex_init(&dev->domain_data.irq_lock);
1982 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1983 dev->domain_data.irqenable = buf;
1985 dev->domain_data.irqchip = &lan78xx_irqchip;
1986 dev->domain_data.irq_handler = handle_simple_irq;
1988 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1989 &chip_domain_ops, &dev->domain_data);
1991 /* create mapping for PHY interrupt */
1992 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1994 irq_domain_remove(irqdomain);
2003 dev->domain_data.irqdomain = irqdomain;
2004 dev->domain_data.phyirq = irqmap;
2009 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2011 if (dev->domain_data.phyirq > 0) {
2012 irq_dispose_mapping(dev->domain_data.phyirq);
2014 if (dev->domain_data.irqdomain)
2015 irq_domain_remove(dev->domain_data.irqdomain);
2017 dev->domain_data.phyirq = 0;
2018 dev->domain_data.irqdomain = NULL;
2021 static int lan8835_fixup(struct phy_device *phydev)
2025 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2027 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2028 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2031 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2033 /* RGMII MAC TXC Delay Enable */
2034 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2035 MAC_RGMII_ID_TXC_DELAY_EN_);
2037 /* RGMII TX DLL Tune Adjust */
2038 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2040 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2045 static int ksz9031rnx_fixup(struct phy_device *phydev)
2047 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2049 /* Micrel9301RNX PHY configuration */
2050 /* RGMII Control Signal Pad Skew */
2051 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2052 /* RGMII RX Data Pad Skew */
2053 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2054 /* RGMII RX Clock Pad Skew */
2055 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2057 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2062 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2066 struct fixed_phy_status fphy_status = {
2068 .speed = SPEED_1000,
2069 .duplex = DUPLEX_FULL,
2071 struct phy_device *phydev;
2073 phydev = phy_find_first(dev->mdiobus);
2075 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2076 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2078 if (IS_ERR(phydev)) {
2079 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2082 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2083 dev->interface = PHY_INTERFACE_MODE_RGMII;
2084 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2085 MAC_RGMII_ID_TXC_DELAY_EN_);
2086 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2087 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2088 buf |= HW_CFG_CLK125_EN_;
2089 buf |= HW_CFG_REFCLK25_EN_;
2090 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2093 netdev_err(dev->net, "no PHY driver found\n");
2096 dev->interface = PHY_INTERFACE_MODE_RGMII;
2097 /* external PHY fixup for KSZ9031RNX */
2098 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2101 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2104 /* external PHY fixup for LAN8835 */
2105 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2108 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2111 /* add more external PHY fixup here if needed */
2113 phydev->is_internal = false;
2118 static int lan78xx_phy_init(struct lan78xx_net *dev)
2122 struct phy_device *phydev;
2124 switch (dev->chipid) {
2125 case ID_REV_CHIP_ID_7801_:
2126 phydev = lan7801_phy_init(dev);
2128 netdev_err(dev->net, "lan7801: PHY Init Failed");
2133 case ID_REV_CHIP_ID_7800_:
2134 case ID_REV_CHIP_ID_7850_:
2135 phydev = phy_find_first(dev->mdiobus);
2137 netdev_err(dev->net, "no PHY found\n");
2140 phydev->is_internal = true;
2141 dev->interface = PHY_INTERFACE_MODE_GMII;
2145 netdev_err(dev->net, "Unknown CHIP ID found\n");
2149 /* if phyirq is not set, use polling mode in phylib */
2150 if (dev->domain_data.phyirq > 0)
2151 phydev->irq = dev->domain_data.phyirq;
2154 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2156 /* set to AUTOMDIX */
2157 phydev->mdix = ETH_TP_MDI_AUTO;
2159 ret = phy_connect_direct(dev->net, phydev,
2160 lan78xx_link_status_change,
2163 netdev_err(dev->net, "can't attach PHY to %s\n",
2165 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2166 if (phy_is_pseudo_fixed_link(phydev)) {
2167 fixed_phy_unregister(phydev);
2169 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2171 phy_unregister_fixup_for_uid(PHY_LAN8835,
2178 /* MAC doesn't support 1000T Half */
2179 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2181 /* support both flow controls */
2182 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2183 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2184 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2185 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2187 if (phydev->mdio.dev.of_node) {
2191 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2192 "microchip,led-modes",
2195 /* Ensure the appropriate LEDs are enabled */
2196 lan78xx_read_reg(dev, HW_CFG, ®);
2197 reg &= ~(HW_CFG_LED0_EN_ |
2201 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2202 (len > 1) * HW_CFG_LED1_EN_ |
2203 (len > 2) * HW_CFG_LED2_EN_ |
2204 (len > 3) * HW_CFG_LED3_EN_;
2205 lan78xx_write_reg(dev, HW_CFG, reg);
2209 genphy_config_aneg(phydev);
2211 dev->fc_autoneg = phydev->autoneg;
2216 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2222 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2224 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2227 buf &= ~MAC_RX_RXEN_;
2228 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2231 /* add 4 to size for FCS */
2232 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2233 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2235 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2238 buf |= MAC_RX_RXEN_;
2239 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2245 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2247 struct sk_buff *skb;
2248 unsigned long flags;
2251 spin_lock_irqsave(&q->lock, flags);
2252 while (!skb_queue_empty(q)) {
2253 struct skb_data *entry;
2257 skb_queue_walk(q, skb) {
2258 entry = (struct skb_data *)skb->cb;
2259 if (entry->state != unlink_start)
2264 entry->state = unlink_start;
2267 /* Get reference count of the URB to avoid it to be
2268 * freed during usb_unlink_urb, which may trigger
2269 * use-after-free problem inside usb_unlink_urb since
2270 * usb_unlink_urb is always racing with .complete
2271 * handler(include defer_bh).
2274 spin_unlock_irqrestore(&q->lock, flags);
2275 /* during some PM-driven resume scenarios,
2276 * these (async) unlinks complete immediately
2278 ret = usb_unlink_urb(urb);
2279 if (ret != -EINPROGRESS && ret != 0)
2280 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2284 spin_lock_irqsave(&q->lock, flags);
2286 spin_unlock_irqrestore(&q->lock, flags);
2290 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2292 struct lan78xx_net *dev = netdev_priv(netdev);
2293 int ll_mtu = new_mtu + netdev->hard_header_len;
2294 int old_hard_mtu = dev->hard_mtu;
2295 int old_rx_urb_size = dev->rx_urb_size;
2298 /* no second zero-length packet read wanted after mtu-sized packets */
2299 if ((ll_mtu % dev->maxpacket) == 0)
2302 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2304 netdev->mtu = new_mtu;
2306 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2307 if (dev->rx_urb_size == old_hard_mtu) {
2308 dev->rx_urb_size = dev->hard_mtu;
2309 if (dev->rx_urb_size > old_rx_urb_size) {
2310 if (netif_running(dev->net)) {
2311 unlink_urbs(dev, &dev->rxq);
2312 tasklet_schedule(&dev->bh);
2320 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2322 struct lan78xx_net *dev = netdev_priv(netdev);
2323 struct sockaddr *addr = p;
2324 u32 addr_lo, addr_hi;
2327 if (netif_running(netdev))
2330 if (!is_valid_ether_addr(addr->sa_data))
2331 return -EADDRNOTAVAIL;
2333 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2335 addr_lo = netdev->dev_addr[0] |
2336 netdev->dev_addr[1] << 8 |
2337 netdev->dev_addr[2] << 16 |
2338 netdev->dev_addr[3] << 24;
2339 addr_hi = netdev->dev_addr[4] |
2340 netdev->dev_addr[5] << 8;
2342 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2343 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2348 /* Enable or disable Rx checksum offload engine */
2349 static int lan78xx_set_features(struct net_device *netdev,
2350 netdev_features_t features)
2352 struct lan78xx_net *dev = netdev_priv(netdev);
2353 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2354 unsigned long flags;
2357 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2359 if (features & NETIF_F_RXCSUM) {
2360 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2361 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2363 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2364 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2367 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2368 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2370 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2372 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2373 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2375 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2377 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2379 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2384 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2386 struct lan78xx_priv *pdata =
2387 container_of(param, struct lan78xx_priv, set_vlan);
2388 struct lan78xx_net *dev = pdata->dev;
2390 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2391 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2394 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2395 __be16 proto, u16 vid)
2397 struct lan78xx_net *dev = netdev_priv(netdev);
2398 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2400 u16 vid_dword_index;
2402 vid_dword_index = (vid >> 5) & 0x7F;
2403 vid_bit_index = vid & 0x1F;
2405 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2407 /* defer register writes to a sleepable context */
2408 schedule_work(&pdata->set_vlan);
2413 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2414 __be16 proto, u16 vid)
2416 struct lan78xx_net *dev = netdev_priv(netdev);
2417 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2419 u16 vid_dword_index;
2421 vid_dword_index = (vid >> 5) & 0x7F;
2422 vid_bit_index = vid & 0x1F;
2424 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2426 /* defer register writes to a sleepable context */
2427 schedule_work(&pdata->set_vlan);
2432 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2436 u32 regs[6] = { 0 };
2438 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2439 if (buf & USB_CFG1_LTM_ENABLE_) {
2441 /* Get values from EEPROM first */
2442 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2443 if (temp[0] == 24) {
2444 ret = lan78xx_read_raw_eeprom(dev,
2451 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2452 if (temp[0] == 24) {
2453 ret = lan78xx_read_raw_otp(dev,
2463 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2464 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2465 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2466 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2467 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2468 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2471 static int lan78xx_reset(struct lan78xx_net *dev)
2473 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2476 unsigned long timeout;
2479 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2480 buf |= HW_CFG_LRST_;
2481 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2483 timeout = jiffies + HZ;
2486 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2487 if (time_after(jiffies, timeout)) {
2488 netdev_warn(dev->net,
2489 "timeout on completion of LiteReset");
2492 } while (buf & HW_CFG_LRST_);
2494 lan78xx_init_mac_address(dev);
2496 /* save DEVID for later usage */
2497 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2498 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2499 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2501 /* Respond to the IN token with a NAK */
2502 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2503 buf |= USB_CFG_BIR_;
2504 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2507 lan78xx_init_ltm(dev);
2509 if (dev->udev->speed == USB_SPEED_SUPER) {
2510 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2511 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2514 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2515 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2516 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2517 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2518 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2520 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2521 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2526 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2527 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2529 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2531 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2533 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2534 buf |= USB_CFG_BCE_;
2535 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2537 /* set FIFO sizes */
2538 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2539 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2541 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2542 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2544 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2545 ret = lan78xx_write_reg(dev, FLOW, 0);
2546 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2548 /* Don't need rfe_ctl_lock during initialisation */
2549 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2550 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2551 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2553 /* Enable or disable checksum offload engines */
2554 lan78xx_set_features(dev->net, dev->net->features);
2556 lan78xx_set_multicast(dev->net);
2559 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2560 buf |= PMT_CTL_PHY_RST_;
2561 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2563 timeout = jiffies + HZ;
2566 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2567 if (time_after(jiffies, timeout)) {
2568 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2571 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2573 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2574 /* LAN7801 only has RGMII mode */
2575 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2576 buf &= ~MAC_CR_GMII_EN_;
2578 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2579 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2580 if (!ret && sig != EEPROM_INDICATOR) {
2581 /* Implies there is no external eeprom. Set mac speed */
2582 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2583 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2586 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2588 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2589 buf |= MAC_TX_TXEN_;
2590 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2592 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2593 buf |= FCT_TX_CTL_EN_;
2594 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2596 ret = lan78xx_set_rx_max_frame_length(dev,
2597 dev->net->mtu + VLAN_ETH_HLEN);
2599 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2600 buf |= MAC_RX_RXEN_;
2601 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2603 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2604 buf |= FCT_RX_CTL_EN_;
2605 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2610 static void lan78xx_init_stats(struct lan78xx_net *dev)
2615 /* initialize for stats update
2616 * some counters are 20bits and some are 32bits
2618 p = (u32 *)&dev->stats.rollover_max;
2619 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2622 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2623 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2624 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2625 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2626 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2627 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2628 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2629 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2630 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2631 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2633 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2636 static int lan78xx_open(struct net_device *net)
2638 struct lan78xx_net *dev = netdev_priv(net);
2641 ret = usb_autopm_get_interface(dev->intf);
2645 phy_start(net->phydev);
2647 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2649 /* for Link Check */
2650 if (dev->urb_intr) {
2651 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2653 netif_err(dev, ifup, dev->net,
2654 "intr submit %d\n", ret);
2659 lan78xx_init_stats(dev);
2661 set_bit(EVENT_DEV_OPEN, &dev->flags);
2663 netif_start_queue(net);
2665 dev->link_on = false;
2667 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2669 usb_autopm_put_interface(dev->intf);
2675 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2677 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2678 DECLARE_WAITQUEUE(wait, current);
2681 /* ensure there are no more active urbs */
2682 add_wait_queue(&unlink_wakeup, &wait);
2683 set_current_state(TASK_UNINTERRUPTIBLE);
2684 dev->wait = &unlink_wakeup;
2685 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2687 /* maybe wait for deletions to finish. */
2688 while (!skb_queue_empty(&dev->rxq) &&
2689 !skb_queue_empty(&dev->txq) &&
2690 !skb_queue_empty(&dev->done)) {
2691 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2692 set_current_state(TASK_UNINTERRUPTIBLE);
2693 netif_dbg(dev, ifdown, dev->net,
2694 "waited for %d urb completions\n", temp);
2696 set_current_state(TASK_RUNNING);
2698 remove_wait_queue(&unlink_wakeup, &wait);
2701 static int lan78xx_stop(struct net_device *net)
2703 struct lan78xx_net *dev = netdev_priv(net);
2705 if (timer_pending(&dev->stat_monitor))
2706 del_timer_sync(&dev->stat_monitor);
2709 phy_stop(net->phydev);
2711 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2712 netif_stop_queue(net);
2714 netif_info(dev, ifdown, dev->net,
2715 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2716 net->stats.rx_packets, net->stats.tx_packets,
2717 net->stats.rx_errors, net->stats.tx_errors);
2719 lan78xx_terminate_urbs(dev);
2721 usb_kill_urb(dev->urb_intr);
2723 skb_queue_purge(&dev->rxq_pause);
2725 /* deferred work (task, timer, softirq) must also stop.
2726 * can't flush_scheduled_work() until we drop rtnl (later),
2727 * else workers could deadlock; so make workers a NOP.
2730 cancel_delayed_work_sync(&dev->wq);
2731 tasklet_kill(&dev->bh);
2733 usb_autopm_put_interface(dev->intf);
2738 static int lan78xx_linearize(struct sk_buff *skb)
2740 return skb_linearize(skb);
2743 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2744 struct sk_buff *skb, gfp_t flags)
2746 u32 tx_cmd_a, tx_cmd_b;
2748 if (skb_cow_head(skb, TX_OVERHEAD)) {
2749 dev_kfree_skb_any(skb);
2753 if (lan78xx_linearize(skb) < 0)
2756 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2758 if (skb->ip_summed == CHECKSUM_PARTIAL)
2759 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2762 if (skb_is_gso(skb)) {
2763 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2765 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2767 tx_cmd_a |= TX_CMD_A_LSO_;
2770 if (skb_vlan_tag_present(skb)) {
2771 tx_cmd_a |= TX_CMD_A_IVTG_;
2772 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2776 cpu_to_le32s(&tx_cmd_b);
2777 memcpy(skb->data, &tx_cmd_b, 4);
2780 cpu_to_le32s(&tx_cmd_a);
2781 memcpy(skb->data, &tx_cmd_a, 4);
2786 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2787 struct sk_buff_head *list, enum skb_state state)
2789 unsigned long flags;
2790 enum skb_state old_state;
2791 struct skb_data *entry = (struct skb_data *)skb->cb;
2793 spin_lock_irqsave(&list->lock, flags);
2794 old_state = entry->state;
2795 entry->state = state;
2797 __skb_unlink(skb, list);
2798 spin_unlock(&list->lock);
2799 spin_lock(&dev->done.lock);
2801 __skb_queue_tail(&dev->done, skb);
2802 if (skb_queue_len(&dev->done) == 1)
2803 tasklet_schedule(&dev->bh);
2804 spin_unlock_irqrestore(&dev->done.lock, flags);
2809 static void tx_complete(struct urb *urb)
2811 struct sk_buff *skb = (struct sk_buff *)urb->context;
2812 struct skb_data *entry = (struct skb_data *)skb->cb;
2813 struct lan78xx_net *dev = entry->dev;
2815 if (urb->status == 0) {
2816 dev->net->stats.tx_packets += entry->num_of_packet;
2817 dev->net->stats.tx_bytes += entry->length;
2819 dev->net->stats.tx_errors++;
2821 switch (urb->status) {
2823 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2826 /* software-driven interface shutdown */
2834 netif_stop_queue(dev->net);
2837 netif_dbg(dev, tx_err, dev->net,
2838 "tx err %d\n", entry->urb->status);
2843 usb_autopm_put_interface_async(dev->intf);
2845 defer_bh(dev, skb, &dev->txq, tx_done);
2848 static void lan78xx_queue_skb(struct sk_buff_head *list,
2849 struct sk_buff *newsk, enum skb_state state)
2851 struct skb_data *entry = (struct skb_data *)newsk->cb;
2853 __skb_queue_tail(list, newsk);
2854 entry->state = state;
2858 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2860 struct lan78xx_net *dev = netdev_priv(net);
2861 struct sk_buff *skb2 = NULL;
2864 skb_tx_timestamp(skb);
2865 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2869 skb_queue_tail(&dev->txq_pend, skb2);
2871 /* throttle TX patch at slower than SUPER SPEED USB */
2872 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2873 (skb_queue_len(&dev->txq_pend) > 10))
2874 netif_stop_queue(net);
2876 netif_dbg(dev, tx_err, dev->net,
2877 "lan78xx_tx_prep return NULL\n");
2878 dev->net->stats.tx_errors++;
2879 dev->net->stats.tx_dropped++;
2882 tasklet_schedule(&dev->bh);
2884 return NETDEV_TX_OK;
2888 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2891 struct usb_host_interface *alt = NULL;
2892 struct usb_host_endpoint *in = NULL, *out = NULL;
2893 struct usb_host_endpoint *status = NULL;
2895 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2901 alt = intf->altsetting + tmp;
2903 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2904 struct usb_host_endpoint *e;
2907 e = alt->endpoint + ep;
2908 switch (e->desc.bmAttributes) {
2909 case USB_ENDPOINT_XFER_INT:
2910 if (!usb_endpoint_dir_in(&e->desc))
2914 case USB_ENDPOINT_XFER_BULK:
2919 if (usb_endpoint_dir_in(&e->desc)) {
2922 else if (intr && !status)
2932 if (!alt || !in || !out)
2935 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2936 in->desc.bEndpointAddress &
2937 USB_ENDPOINT_NUMBER_MASK);
2938 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2939 out->desc.bEndpointAddress &
2940 USB_ENDPOINT_NUMBER_MASK);
2941 dev->ep_intr = status;
2946 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2948 struct lan78xx_priv *pdata = NULL;
2952 ret = lan78xx_get_endpoints(dev, intf);
2954 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2956 pdata = (struct lan78xx_priv *)(dev->data[0]);
2958 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2964 spin_lock_init(&pdata->rfe_ctl_lock);
2965 mutex_init(&pdata->dataport_mutex);
2967 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2969 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2970 pdata->vlan_table[i] = 0;
2972 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2974 dev->net->features = 0;
2976 if (DEFAULT_TX_CSUM_ENABLE)
2977 dev->net->features |= NETIF_F_HW_CSUM;
2979 if (DEFAULT_RX_CSUM_ENABLE)
2980 dev->net->features |= NETIF_F_RXCSUM;
2982 if (DEFAULT_TSO_CSUM_ENABLE)
2983 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2985 if (DEFAULT_VLAN_RX_OFFLOAD)
2986 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2988 if (DEFAULT_VLAN_FILTER_ENABLE)
2989 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2991 dev->net->hw_features = dev->net->features;
2993 ret = lan78xx_setup_irq_domain(dev);
2995 netdev_warn(dev->net,
2996 "lan78xx_setup_irq_domain() failed : %d", ret);
3000 dev->net->hard_header_len += TX_OVERHEAD;
3001 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3003 /* Init all registers */
3004 ret = lan78xx_reset(dev);
3006 netdev_warn(dev->net, "Registers INIT FAILED....");
3010 ret = lan78xx_mdio_init(dev);
3012 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3016 dev->net->flags |= IFF_MULTICAST;
3018 pdata->wol = WAKE_MAGIC;
3023 lan78xx_remove_irq_domain(dev);
3026 netdev_warn(dev->net, "Bind routine FAILED");
3027 cancel_work_sync(&pdata->set_multicast);
3028 cancel_work_sync(&pdata->set_vlan);
3033 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3035 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3037 lan78xx_remove_irq_domain(dev);
3039 lan78xx_remove_mdio(dev);
3042 cancel_work_sync(&pdata->set_multicast);
3043 cancel_work_sync(&pdata->set_vlan);
3044 netif_dbg(dev, ifdown, dev->net, "free pdata");
3051 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3052 struct sk_buff *skb,
3053 u32 rx_cmd_a, u32 rx_cmd_b)
3055 /* HW Checksum offload appears to be flawed if used when not stripping
3056 * VLAN headers. Drop back to S/W checksums under these conditions.
3058 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3059 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3060 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3061 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3062 skb->ip_summed = CHECKSUM_NONE;
3064 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3065 skb->ip_summed = CHECKSUM_COMPLETE;
3069 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3070 struct sk_buff *skb,
3071 u32 rx_cmd_a, u32 rx_cmd_b)
3073 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3074 (rx_cmd_a & RX_CMD_A_FVTG_))
3075 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3076 (rx_cmd_b & 0xffff));
3079 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3083 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3084 skb_queue_tail(&dev->rxq_pause, skb);
3088 dev->net->stats.rx_packets++;
3089 dev->net->stats.rx_bytes += skb->len;
3091 skb->protocol = eth_type_trans(skb, dev->net);
3093 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3094 skb->len + sizeof(struct ethhdr), skb->protocol);
3095 memset(skb->cb, 0, sizeof(struct skb_data));
3097 if (skb_defer_rx_timestamp(skb))
3100 status = netif_rx(skb);
3101 if (status != NET_RX_SUCCESS)
3102 netif_dbg(dev, rx_err, dev->net,
3103 "netif_rx status %d\n", status);
3106 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3108 if (skb->len < dev->net->hard_header_len)
3111 while (skb->len > 0) {
3112 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3114 struct sk_buff *skb2;
3115 unsigned char *packet;
3117 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3118 le32_to_cpus(&rx_cmd_a);
3119 skb_pull(skb, sizeof(rx_cmd_a));
3121 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3122 le32_to_cpus(&rx_cmd_b);
3123 skb_pull(skb, sizeof(rx_cmd_b));
3125 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3126 le16_to_cpus(&rx_cmd_c);
3127 skb_pull(skb, sizeof(rx_cmd_c));
3131 /* get the packet length */
3132 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3133 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3135 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3136 netif_dbg(dev, rx_err, dev->net,
3137 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3139 /* last frame in this batch */
3140 if (skb->len == size) {
3141 lan78xx_rx_csum_offload(dev, skb,
3142 rx_cmd_a, rx_cmd_b);
3143 lan78xx_rx_vlan_offload(dev, skb,
3144 rx_cmd_a, rx_cmd_b);
3146 skb_trim(skb, skb->len - 4); /* remove fcs */
3147 skb->truesize = size + sizeof(struct sk_buff);
3152 skb2 = skb_clone(skb, GFP_ATOMIC);
3153 if (unlikely(!skb2)) {
3154 netdev_warn(dev->net, "Error allocating skb");
3159 skb2->data = packet;
3160 skb_set_tail_pointer(skb2, size);
3162 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3163 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3165 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3166 skb2->truesize = size + sizeof(struct sk_buff);
3168 lan78xx_skb_return(dev, skb2);
3171 skb_pull(skb, size);
3173 /* padding bytes before the next frame starts */
3175 skb_pull(skb, align_count);
3181 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3183 if (!lan78xx_rx(dev, skb)) {
3184 dev->net->stats.rx_errors++;
3189 lan78xx_skb_return(dev, skb);
3193 netif_dbg(dev, rx_err, dev->net, "drop\n");
3194 dev->net->stats.rx_errors++;
3196 skb_queue_tail(&dev->done, skb);
3199 static void rx_complete(struct urb *urb);
3201 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3203 struct sk_buff *skb;
3204 struct skb_data *entry;
3205 unsigned long lockflags;
3206 size_t size = dev->rx_urb_size;
3209 skb = netdev_alloc_skb_ip_align(dev->net, size);
3215 entry = (struct skb_data *)skb->cb;
3220 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3221 skb->data, size, rx_complete, skb);
3223 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3225 if (netif_device_present(dev->net) &&
3226 netif_running(dev->net) &&
3227 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3228 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3229 ret = usb_submit_urb(urb, GFP_ATOMIC);
3232 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3235 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3238 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3239 netif_device_detach(dev->net);
3245 netif_dbg(dev, rx_err, dev->net,
3246 "rx submit, %d\n", ret);
3247 tasklet_schedule(&dev->bh);
3250 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3253 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3255 dev_kfree_skb_any(skb);
3261 static void rx_complete(struct urb *urb)
3263 struct sk_buff *skb = (struct sk_buff *)urb->context;
3264 struct skb_data *entry = (struct skb_data *)skb->cb;
3265 struct lan78xx_net *dev = entry->dev;
3266 int urb_status = urb->status;
3267 enum skb_state state;
3269 skb_put(skb, urb->actual_length);
3273 switch (urb_status) {
3275 if (skb->len < dev->net->hard_header_len) {
3277 dev->net->stats.rx_errors++;
3278 dev->net->stats.rx_length_errors++;
3279 netif_dbg(dev, rx_err, dev->net,
3280 "rx length %d\n", skb->len);
3282 usb_mark_last_busy(dev->udev);
3285 dev->net->stats.rx_errors++;
3286 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3288 case -ECONNRESET: /* async unlink */
3289 case -ESHUTDOWN: /* hardware gone */
3290 netif_dbg(dev, ifdown, dev->net,
3291 "rx shutdown, code %d\n", urb_status);
3299 dev->net->stats.rx_errors++;
3305 /* data overrun ... flush fifo? */
3307 dev->net->stats.rx_over_errors++;
3312 dev->net->stats.rx_errors++;
3313 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3317 state = defer_bh(dev, skb, &dev->rxq, state);
3320 if (netif_running(dev->net) &&
3321 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3322 state != unlink_start) {
3323 rx_submit(dev, urb, GFP_ATOMIC);
3328 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3331 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3334 struct urb *urb = NULL;
3335 struct skb_data *entry;
3336 unsigned long flags;
3337 struct sk_buff_head *tqp = &dev->txq_pend;
3338 struct sk_buff *skb, *skb2;
3341 int skb_totallen, pkt_cnt;
3347 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3348 if (skb_is_gso(skb)) {
3350 /* handle previous packets first */
3354 length = skb->len - TX_OVERHEAD;
3355 skb2 = skb_dequeue(tqp);
3359 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3361 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3365 /* copy to a single skb */
3366 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3370 skb_put(skb, skb_totallen);
3372 for (count = pos = 0; count < pkt_cnt; count++) {
3373 skb2 = skb_dequeue(tqp);
3375 length += (skb2->len - TX_OVERHEAD);
3376 memcpy(skb->data + pos, skb2->data, skb2->len);
3377 pos += roundup(skb2->len, sizeof(u32));
3378 dev_kfree_skb(skb2);
3383 urb = usb_alloc_urb(0, GFP_ATOMIC);
3387 entry = (struct skb_data *)skb->cb;
3390 entry->length = length;
3391 entry->num_of_packet = count;
3393 spin_lock_irqsave(&dev->txq.lock, flags);
3394 ret = usb_autopm_get_interface_async(dev->intf);
3396 spin_unlock_irqrestore(&dev->txq.lock, flags);
3400 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3401 skb->data, skb->len, tx_complete, skb);
3403 if (length % dev->maxpacket == 0) {
3404 /* send USB_ZERO_PACKET */
3405 urb->transfer_flags |= URB_ZERO_PACKET;
3409 /* if this triggers the device is still a sleep */
3410 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3411 /* transmission will be done in resume */
3412 usb_anchor_urb(urb, &dev->deferred);
3413 /* no use to process more packets */
3414 netif_stop_queue(dev->net);
3416 spin_unlock_irqrestore(&dev->txq.lock, flags);
3417 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3422 ret = usb_submit_urb(urb, GFP_ATOMIC);
3425 netif_trans_update(dev->net);
3426 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3427 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3428 netif_stop_queue(dev->net);
3431 netif_stop_queue(dev->net);
3432 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3433 usb_autopm_put_interface_async(dev->intf);
3436 usb_autopm_put_interface_async(dev->intf);
3437 netif_dbg(dev, tx_err, dev->net,
3438 "tx: submit urb err %d\n", ret);
3442 spin_unlock_irqrestore(&dev->txq.lock, flags);
3445 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3447 dev->net->stats.tx_dropped++;
3449 dev_kfree_skb_any(skb);
3452 netif_dbg(dev, tx_queued, dev->net,
3453 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3456 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3461 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3462 for (i = 0; i < 10; i++) {
3463 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3465 urb = usb_alloc_urb(0, GFP_ATOMIC);
3467 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3471 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3472 tasklet_schedule(&dev->bh);
3474 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3475 netif_wake_queue(dev->net);
3478 static void lan78xx_bh(unsigned long param)
3480 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3481 struct sk_buff *skb;
3482 struct skb_data *entry;
3484 while ((skb = skb_dequeue(&dev->done))) {
3485 entry = (struct skb_data *)(skb->cb);
3486 switch (entry->state) {
3488 entry->state = rx_cleanup;
3489 rx_process(dev, skb);
3492 usb_free_urb(entry->urb);
3496 usb_free_urb(entry->urb);
3500 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3505 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3506 /* reset update timer delta */
3507 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3509 mod_timer(&dev->stat_monitor,
3510 jiffies + STAT_UPDATE_TIMER);
3513 if (!skb_queue_empty(&dev->txq_pend))
3516 if (!timer_pending(&dev->delay) &&
3517 !test_bit(EVENT_RX_HALT, &dev->flags))
3522 static void lan78xx_delayedwork(struct work_struct *work)
3525 struct lan78xx_net *dev;
3527 dev = container_of(work, struct lan78xx_net, wq.work);
3529 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3530 unlink_urbs(dev, &dev->txq);
3531 status = usb_autopm_get_interface(dev->intf);
3534 status = usb_clear_halt(dev->udev, dev->pipe_out);
3535 usb_autopm_put_interface(dev->intf);
3538 status != -ESHUTDOWN) {
3539 if (netif_msg_tx_err(dev))
3541 netdev_err(dev->net,
3542 "can't clear tx halt, status %d\n",
3545 clear_bit(EVENT_TX_HALT, &dev->flags);
3546 if (status != -ESHUTDOWN)
3547 netif_wake_queue(dev->net);
3550 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3551 unlink_urbs(dev, &dev->rxq);
3552 status = usb_autopm_get_interface(dev->intf);
3555 status = usb_clear_halt(dev->udev, dev->pipe_in);
3556 usb_autopm_put_interface(dev->intf);
3559 status != -ESHUTDOWN) {
3560 if (netif_msg_rx_err(dev))
3562 netdev_err(dev->net,
3563 "can't clear rx halt, status %d\n",
3566 clear_bit(EVENT_RX_HALT, &dev->flags);
3567 tasklet_schedule(&dev->bh);
3571 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3574 clear_bit(EVENT_LINK_RESET, &dev->flags);
3575 status = usb_autopm_get_interface(dev->intf);
3578 if (lan78xx_link_reset(dev) < 0) {
3579 usb_autopm_put_interface(dev->intf);
3581 netdev_info(dev->net, "link reset failed (%d)\n",
3584 usb_autopm_put_interface(dev->intf);
3588 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3589 lan78xx_update_stats(dev);
3591 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3593 mod_timer(&dev->stat_monitor,
3594 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3596 dev->delta = min((dev->delta * 2), 50);
3600 static void intr_complete(struct urb *urb)
3602 struct lan78xx_net *dev = urb->context;
3603 int status = urb->status;
3608 lan78xx_status(dev, urb);
3611 /* software-driven interface shutdown */
3612 case -ENOENT: /* urb killed */
3613 case -ESHUTDOWN: /* hardware gone */
3614 netif_dbg(dev, ifdown, dev->net,
3615 "intr shutdown, code %d\n", status);
3618 /* NOTE: not throttling like RX/TX, since this endpoint
3619 * already polls infrequently
3622 netdev_dbg(dev->net, "intr status %d\n", status);
3626 if (!netif_running(dev->net))
3629 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3630 status = usb_submit_urb(urb, GFP_ATOMIC);
3632 netif_err(dev, timer, dev->net,
3633 "intr resubmit --> %d\n", status);
3636 static void lan78xx_disconnect(struct usb_interface *intf)
3638 struct lan78xx_net *dev;
3639 struct usb_device *udev;
3640 struct net_device *net;
3641 struct phy_device *phydev;
3643 dev = usb_get_intfdata(intf);
3644 usb_set_intfdata(intf, NULL);
3648 udev = interface_to_usbdev(intf);
3650 phydev = net->phydev;
3652 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3653 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3655 phy_disconnect(net->phydev);
3657 if (phy_is_pseudo_fixed_link(phydev))
3658 fixed_phy_unregister(phydev);
3660 unregister_netdev(net);
3662 cancel_delayed_work_sync(&dev->wq);
3664 usb_scuttle_anchored_urbs(&dev->deferred);
3666 lan78xx_unbind(dev, intf);
3668 usb_kill_urb(dev->urb_intr);
3669 usb_free_urb(dev->urb_intr);
3675 static void lan78xx_tx_timeout(struct net_device *net)
3677 struct lan78xx_net *dev = netdev_priv(net);
3679 unlink_urbs(dev, &dev->txq);
3680 tasklet_schedule(&dev->bh);
3683 static const struct net_device_ops lan78xx_netdev_ops = {
3684 .ndo_open = lan78xx_open,
3685 .ndo_stop = lan78xx_stop,
3686 .ndo_start_xmit = lan78xx_start_xmit,
3687 .ndo_tx_timeout = lan78xx_tx_timeout,
3688 .ndo_change_mtu = lan78xx_change_mtu,
3689 .ndo_set_mac_address = lan78xx_set_mac_addr,
3690 .ndo_validate_addr = eth_validate_addr,
3691 .ndo_do_ioctl = lan78xx_ioctl,
3692 .ndo_set_rx_mode = lan78xx_set_multicast,
3693 .ndo_set_features = lan78xx_set_features,
3694 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3695 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3698 static void lan78xx_stat_monitor(struct timer_list *t)
3700 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3702 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3705 static int lan78xx_probe(struct usb_interface *intf,
3706 const struct usb_device_id *id)
3708 struct lan78xx_net *dev;
3709 struct net_device *netdev;
3710 struct usb_device *udev;
3716 udev = interface_to_usbdev(intf);
3717 udev = usb_get_dev(udev);
3719 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3721 dev_err(&intf->dev, "Error: OOM\n");
3726 /* netdev_printk() needs this */
3727 SET_NETDEV_DEV(netdev, &intf->dev);
3729 dev = netdev_priv(netdev);
3733 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3734 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3736 skb_queue_head_init(&dev->rxq);
3737 skb_queue_head_init(&dev->txq);
3738 skb_queue_head_init(&dev->done);
3739 skb_queue_head_init(&dev->rxq_pause);
3740 skb_queue_head_init(&dev->txq_pend);
3741 mutex_init(&dev->phy_mutex);
3743 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3744 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3745 init_usb_anchor(&dev->deferred);
3747 netdev->netdev_ops = &lan78xx_netdev_ops;
3748 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3749 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3752 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3754 mutex_init(&dev->stats.access_lock);
3756 ret = lan78xx_bind(dev, intf);
3759 strcpy(netdev->name, "eth%d");
3761 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3762 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3764 /* MTU range: 68 - 9000 */
3765 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3767 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3768 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3769 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3771 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3772 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3774 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3775 dev->ep_intr->desc.bEndpointAddress &
3776 USB_ENDPOINT_NUMBER_MASK);
3777 period = dev->ep_intr->desc.bInterval;
3779 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3780 buf = kmalloc(maxp, GFP_KERNEL);
3782 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3783 if (!dev->urb_intr) {
3788 usb_fill_int_urb(dev->urb_intr, dev->udev,
3789 dev->pipe_intr, buf, maxp,
3790 intr_complete, dev, period);
3794 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3796 /* driver requires remote-wakeup capability during autosuspend. */
3797 intf->needs_remote_wakeup = 1;
3799 ret = register_netdev(netdev);
3801 netif_err(dev, probe, netdev, "couldn't register the device\n");
3805 usb_set_intfdata(intf, dev);
3807 ret = device_set_wakeup_enable(&udev->dev, true);
3809 /* Default delay of 2sec has more overhead than advantage.
3810 * Set to 10sec as default.
3812 pm_runtime_set_autosuspend_delay(&udev->dev,
3813 DEFAULT_AUTOSUSPEND_DELAY);
3815 ret = lan78xx_phy_init(dev);
3822 unregister_netdev(netdev);
3824 lan78xx_unbind(dev, intf);
3826 free_netdev(netdev);
3833 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3835 const u16 crc16poly = 0x8005;
3841 for (i = 0; i < len; i++) {
3843 for (bit = 0; bit < 8; bit++) {
3847 if (msb ^ (u16)(data & 1)) {
3849 crc |= (u16)0x0001U;
3858 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3866 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3867 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3868 const u8 arp_type[2] = { 0x08, 0x06 };
3870 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3871 buf &= ~MAC_TX_TXEN_;
3872 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3873 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3874 buf &= ~MAC_RX_RXEN_;
3875 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3877 ret = lan78xx_write_reg(dev, WUCSR, 0);
3878 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3879 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3884 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3885 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3886 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3888 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3889 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3892 if (wol & WAKE_PHY) {
3893 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3895 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3896 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3897 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3899 if (wol & WAKE_MAGIC) {
3900 temp_wucsr |= WUCSR_MPEN_;
3902 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3903 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3904 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3906 if (wol & WAKE_BCAST) {
3907 temp_wucsr |= WUCSR_BCST_EN_;
3909 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3910 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3911 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3913 if (wol & WAKE_MCAST) {
3914 temp_wucsr |= WUCSR_WAKE_EN_;
3916 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3917 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3918 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3920 WUF_CFGX_TYPE_MCAST_ |
3921 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3922 (crc & WUF_CFGX_CRC16_MASK_));
3924 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3925 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3926 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3927 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3930 /* for IPv6 Multicast */
3931 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3932 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3934 WUF_CFGX_TYPE_MCAST_ |
3935 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3936 (crc & WUF_CFGX_CRC16_MASK_));
3938 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3939 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3940 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3941 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3944 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3945 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3946 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3948 if (wol & WAKE_UCAST) {
3949 temp_wucsr |= WUCSR_PFDA_EN_;
3951 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3952 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3953 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3955 if (wol & WAKE_ARP) {
3956 temp_wucsr |= WUCSR_WAKE_EN_;
3958 /* set WUF_CFG & WUF_MASK
3959 * for packettype (offset 12,13) = ARP (0x0806)
3961 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3962 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3964 WUF_CFGX_TYPE_ALL_ |
3965 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3966 (crc & WUF_CFGX_CRC16_MASK_));
3968 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3969 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3970 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3971 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3974 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3975 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3976 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3979 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3981 /* when multiple WOL bits are set */
3982 if (hweight_long((unsigned long)wol) > 1) {
3983 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3984 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3985 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3987 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3990 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3991 buf |= PMT_CTL_WUPS_MASK_;
3992 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3994 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3995 buf |= MAC_RX_RXEN_;
3996 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4001 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4003 struct lan78xx_net *dev = usb_get_intfdata(intf);
4004 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4009 event = message.event;
4011 if (!dev->suspend_count++) {
4012 spin_lock_irq(&dev->txq.lock);
4013 /* don't autosuspend while transmitting */
4014 if ((skb_queue_len(&dev->txq) ||
4015 skb_queue_len(&dev->txq_pend)) &&
4016 PMSG_IS_AUTO(message)) {
4017 spin_unlock_irq(&dev->txq.lock);
4021 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4022 spin_unlock_irq(&dev->txq.lock);
4026 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4027 buf &= ~MAC_TX_TXEN_;
4028 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4029 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4030 buf &= ~MAC_RX_RXEN_;
4031 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4033 /* empty out the rx and queues */
4034 netif_device_detach(dev->net);
4035 lan78xx_terminate_urbs(dev);
4036 usb_kill_urb(dev->urb_intr);
4039 netif_device_attach(dev->net);
4042 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4043 del_timer(&dev->stat_monitor);
4045 if (PMSG_IS_AUTO(message)) {
4046 /* auto suspend (selective suspend) */
4047 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4048 buf &= ~MAC_TX_TXEN_;
4049 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4050 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4051 buf &= ~MAC_RX_RXEN_;
4052 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4054 ret = lan78xx_write_reg(dev, WUCSR, 0);
4055 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4056 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4058 /* set goodframe wakeup */
4059 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4061 buf |= WUCSR_RFE_WAKE_EN_;
4062 buf |= WUCSR_STORE_WAKE_;
4064 ret = lan78xx_write_reg(dev, WUCSR, buf);
4066 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4068 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4069 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4071 buf |= PMT_CTL_PHY_WAKE_EN_;
4072 buf |= PMT_CTL_WOL_EN_;
4073 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4074 buf |= PMT_CTL_SUS_MODE_3_;
4076 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4078 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4080 buf |= PMT_CTL_WUPS_MASK_;
4082 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4084 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4085 buf |= MAC_RX_RXEN_;
4086 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4088 lan78xx_set_suspend(dev, pdata->wol);
4097 static int lan78xx_resume(struct usb_interface *intf)
4099 struct lan78xx_net *dev = usb_get_intfdata(intf);
4100 struct sk_buff *skb;
4105 if (!timer_pending(&dev->stat_monitor)) {
4107 mod_timer(&dev->stat_monitor,
4108 jiffies + STAT_UPDATE_TIMER);
4111 if (!--dev->suspend_count) {
4112 /* resume interrupt URBs */
4113 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4114 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4116 spin_lock_irq(&dev->txq.lock);
4117 while ((res = usb_get_from_anchor(&dev->deferred))) {
4118 skb = (struct sk_buff *)res->context;
4119 ret = usb_submit_urb(res, GFP_ATOMIC);
4121 dev_kfree_skb_any(skb);
4123 usb_autopm_put_interface_async(dev->intf);
4125 netif_trans_update(dev->net);
4126 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4130 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4131 spin_unlock_irq(&dev->txq.lock);
4133 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4134 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4135 netif_start_queue(dev->net);
4136 tasklet_schedule(&dev->bh);
4140 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4141 ret = lan78xx_write_reg(dev, WUCSR, 0);
4142 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4144 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4146 WUCSR2_IPV6_TCPSYN_RCD_ |
4147 WUCSR2_IPV4_TCPSYN_RCD_);
4149 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4150 WUCSR_EEE_RX_WAKE_ |
4152 WUCSR_RFE_WAKE_FR_ |
4157 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4158 buf |= MAC_TX_TXEN_;
4159 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4164 static int lan78xx_reset_resume(struct usb_interface *intf)
4166 struct lan78xx_net *dev = usb_get_intfdata(intf);
4170 phy_start(dev->net->phydev);
4172 return lan78xx_resume(intf);
4175 static const struct usb_device_id products[] = {
4177 /* LAN7800 USB Gigabit Ethernet Device */
4178 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4181 /* LAN7850 USB Gigabit Ethernet Device */
4182 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4185 /* LAN7801 USB Gigabit Ethernet Device */
4186 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4190 MODULE_DEVICE_TABLE(usb, products);
4192 static struct usb_driver lan78xx_driver = {
4193 .name = DRIVER_NAME,
4194 .id_table = products,
4195 .probe = lan78xx_probe,
4196 .disconnect = lan78xx_disconnect,
4197 .suspend = lan78xx_suspend,
4198 .resume = lan78xx_resume,
4199 .reset_resume = lan78xx_reset_resume,
4200 .supports_autosuspend = 1,
4201 .disable_hub_initiated_lpm = 1,
4204 module_usb_driver(lan78xx_driver);
4206 MODULE_AUTHOR(DRIVER_AUTHOR);
4207 MODULE_DESCRIPTION(DRIVER_DESC);
4208 MODULE_LICENSE("GPL");