Merge tag 'mtd/fixes-for-4.18-rc5' of git://git.infradead.org/linux-mtd
[linux-2.6-microblaze.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43
44 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME     "lan78xx"
47
48 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
49 #define THROTTLE_JIFFIES                (HZ / 8)
50 #define UNLINK_TIMEOUT_MS               3
51
52 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
53
54 #define SS_USB_PKT_SIZE                 (1024)
55 #define HS_USB_PKT_SIZE                 (512)
56 #define FS_USB_PKT_SIZE                 (64)
57
58 #define MAX_RX_FIFO_SIZE                (12 * 1024)
59 #define MAX_TX_FIFO_SIZE                (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY           (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE          (9000)
63 #define DEFAULT_TX_CSUM_ENABLE          (true)
64 #define DEFAULT_RX_CSUM_ENABLE          (true)
65 #define DEFAULT_TSO_CSUM_ENABLE         (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
67 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
68 #define TX_OVERHEAD                     (8)
69 #define RXW_PADDING                     2
70
71 #define LAN78XX_USB_VENDOR_ID           (0x0424)
72 #define LAN7800_USB_PRODUCT_ID          (0x7800)
73 #define LAN7850_USB_PRODUCT_ID          (0x7850)
74 #define LAN7801_USB_PRODUCT_ID          (0x7801)
75 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
76 #define LAN78XX_OTP_MAGIC               (0x78F3)
77
78 #define MII_READ                        1
79 #define MII_WRITE                       0
80
81 #define EEPROM_INDICATOR                (0xA5)
82 #define EEPROM_MAC_OFFSET               (0x01)
83 #define MAX_EEPROM_SIZE                 512
84 #define OTP_INDICATOR_1                 (0xF3)
85 #define OTP_INDICATOR_2                 (0xF7)
86
87 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
88                                          WAKE_MCAST | WAKE_BCAST | \
89                                          WAKE_ARP | WAKE_MAGIC)
90
91 /* USB related defines */
92 #define BULK_IN_PIPE                    1
93 #define BULK_OUT_PIPE                   2
94
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
97
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER               (1 * 1000)
100
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP                      (32)
103 #define INT_EP_INTEP                    (31)
104 #define INT_EP_OTP_WR_DONE              (28)
105 #define INT_EP_EEE_TX_LPI_START         (26)
106 #define INT_EP_EEE_TX_LPI_STOP          (25)
107 #define INT_EP_EEE_RX_LPI               (24)
108 #define INT_EP_MAC_RESET_TIMEOUT        (23)
109 #define INT_EP_RDFO                     (22)
110 #define INT_EP_TXE                      (21)
111 #define INT_EP_USB_STATUS               (20)
112 #define INT_EP_TX_DIS                   (19)
113 #define INT_EP_RX_DIS                   (18)
114 #define INT_EP_PHY                      (17)
115 #define INT_EP_DP                       (16)
116 #define INT_EP_MAC_ERR                  (15)
117 #define INT_EP_TDFU                     (14)
118 #define INT_EP_TDFO                     (13)
119 #define INT_EP_UTX                      (12)
120 #define INT_EP_GPIO_11                  (11)
121 #define INT_EP_GPIO_10                  (10)
122 #define INT_EP_GPIO_9                   (9)
123 #define INT_EP_GPIO_8                   (8)
124 #define INT_EP_GPIO_7                   (7)
125 #define INT_EP_GPIO_6                   (6)
126 #define INT_EP_GPIO_5                   (5)
127 #define INT_EP_GPIO_4                   (4)
128 #define INT_EP_GPIO_3                   (3)
129 #define INT_EP_GPIO_2                   (2)
130 #define INT_EP_GPIO_1                   (1)
131 #define INT_EP_GPIO_0                   (0)
132
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134         "RX FCS Errors",
135         "RX Alignment Errors",
136         "Rx Fragment Errors",
137         "RX Jabber Errors",
138         "RX Undersize Frame Errors",
139         "RX Oversize Frame Errors",
140         "RX Dropped Frames",
141         "RX Unicast Byte Count",
142         "RX Broadcast Byte Count",
143         "RX Multicast Byte Count",
144         "RX Unicast Frames",
145         "RX Broadcast Frames",
146         "RX Multicast Frames",
147         "RX Pause Frames",
148         "RX 64 Byte Frames",
149         "RX 65 - 127 Byte Frames",
150         "RX 128 - 255 Byte Frames",
151         "RX 256 - 511 Bytes Frames",
152         "RX 512 - 1023 Byte Frames",
153         "RX 1024 - 1518 Byte Frames",
154         "RX Greater 1518 Byte Frames",
155         "EEE RX LPI Transitions",
156         "EEE RX LPI Time",
157         "TX FCS Errors",
158         "TX Excess Deferral Errors",
159         "TX Carrier Errors",
160         "TX Bad Byte Count",
161         "TX Single Collisions",
162         "TX Multiple Collisions",
163         "TX Excessive Collision",
164         "TX Late Collisions",
165         "TX Unicast Byte Count",
166         "TX Broadcast Byte Count",
167         "TX Multicast Byte Count",
168         "TX Unicast Frames",
169         "TX Broadcast Frames",
170         "TX Multicast Frames",
171         "TX Pause Frames",
172         "TX 64 Byte Frames",
173         "TX 65 - 127 Byte Frames",
174         "TX 128 - 255 Byte Frames",
175         "TX 256 - 511 Bytes Frames",
176         "TX 512 - 1023 Byte Frames",
177         "TX 1024 - 1518 Byte Frames",
178         "TX Greater 1518 Byte Frames",
179         "EEE TX LPI Transitions",
180         "EEE TX LPI Time",
181 };
182
183 struct lan78xx_statstage {
184         u32 rx_fcs_errors;
185         u32 rx_alignment_errors;
186         u32 rx_fragment_errors;
187         u32 rx_jabber_errors;
188         u32 rx_undersize_frame_errors;
189         u32 rx_oversize_frame_errors;
190         u32 rx_dropped_frames;
191         u32 rx_unicast_byte_count;
192         u32 rx_broadcast_byte_count;
193         u32 rx_multicast_byte_count;
194         u32 rx_unicast_frames;
195         u32 rx_broadcast_frames;
196         u32 rx_multicast_frames;
197         u32 rx_pause_frames;
198         u32 rx_64_byte_frames;
199         u32 rx_65_127_byte_frames;
200         u32 rx_128_255_byte_frames;
201         u32 rx_256_511_bytes_frames;
202         u32 rx_512_1023_byte_frames;
203         u32 rx_1024_1518_byte_frames;
204         u32 rx_greater_1518_byte_frames;
205         u32 eee_rx_lpi_transitions;
206         u32 eee_rx_lpi_time;
207         u32 tx_fcs_errors;
208         u32 tx_excess_deferral_errors;
209         u32 tx_carrier_errors;
210         u32 tx_bad_byte_count;
211         u32 tx_single_collisions;
212         u32 tx_multiple_collisions;
213         u32 tx_excessive_collision;
214         u32 tx_late_collisions;
215         u32 tx_unicast_byte_count;
216         u32 tx_broadcast_byte_count;
217         u32 tx_multicast_byte_count;
218         u32 tx_unicast_frames;
219         u32 tx_broadcast_frames;
220         u32 tx_multicast_frames;
221         u32 tx_pause_frames;
222         u32 tx_64_byte_frames;
223         u32 tx_65_127_byte_frames;
224         u32 tx_128_255_byte_frames;
225         u32 tx_256_511_bytes_frames;
226         u32 tx_512_1023_byte_frames;
227         u32 tx_1024_1518_byte_frames;
228         u32 tx_greater_1518_byte_frames;
229         u32 eee_tx_lpi_transitions;
230         u32 eee_tx_lpi_time;
231 };
232
233 struct lan78xx_statstage64 {
234         u64 rx_fcs_errors;
235         u64 rx_alignment_errors;
236         u64 rx_fragment_errors;
237         u64 rx_jabber_errors;
238         u64 rx_undersize_frame_errors;
239         u64 rx_oversize_frame_errors;
240         u64 rx_dropped_frames;
241         u64 rx_unicast_byte_count;
242         u64 rx_broadcast_byte_count;
243         u64 rx_multicast_byte_count;
244         u64 rx_unicast_frames;
245         u64 rx_broadcast_frames;
246         u64 rx_multicast_frames;
247         u64 rx_pause_frames;
248         u64 rx_64_byte_frames;
249         u64 rx_65_127_byte_frames;
250         u64 rx_128_255_byte_frames;
251         u64 rx_256_511_bytes_frames;
252         u64 rx_512_1023_byte_frames;
253         u64 rx_1024_1518_byte_frames;
254         u64 rx_greater_1518_byte_frames;
255         u64 eee_rx_lpi_transitions;
256         u64 eee_rx_lpi_time;
257         u64 tx_fcs_errors;
258         u64 tx_excess_deferral_errors;
259         u64 tx_carrier_errors;
260         u64 tx_bad_byte_count;
261         u64 tx_single_collisions;
262         u64 tx_multiple_collisions;
263         u64 tx_excessive_collision;
264         u64 tx_late_collisions;
265         u64 tx_unicast_byte_count;
266         u64 tx_broadcast_byte_count;
267         u64 tx_multicast_byte_count;
268         u64 tx_unicast_frames;
269         u64 tx_broadcast_frames;
270         u64 tx_multicast_frames;
271         u64 tx_pause_frames;
272         u64 tx_64_byte_frames;
273         u64 tx_65_127_byte_frames;
274         u64 tx_128_255_byte_frames;
275         u64 tx_256_511_bytes_frames;
276         u64 tx_512_1023_byte_frames;
277         u64 tx_1024_1518_byte_frames;
278         u64 tx_greater_1518_byte_frames;
279         u64 eee_tx_lpi_transitions;
280         u64 eee_tx_lpi_time;
281 };
282
283 static u32 lan78xx_regs[] = {
284         ID_REV,
285         INT_STS,
286         HW_CFG,
287         PMT_CTL,
288         E2P_CMD,
289         E2P_DATA,
290         USB_STATUS,
291         VLAN_TYPE,
292         MAC_CR,
293         MAC_RX,
294         MAC_TX,
295         FLOW,
296         ERR_STS,
297         MII_ACC,
298         MII_DATA,
299         EEE_TX_LPI_REQ_DLY,
300         EEE_TW_TX_SYS,
301         EEE_TX_LPI_REM_DLY,
302         WUCSR
303 };
304
305 #define PHY_REG_SIZE (32 * sizeof(u32))
306
307 struct lan78xx_net;
308
309 struct lan78xx_priv {
310         struct lan78xx_net *dev;
311         u32 rfe_ctl;
312         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
313         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
314         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
315         struct mutex dataport_mutex; /* for dataport access */
316         spinlock_t rfe_ctl_lock; /* for rfe register access */
317         struct work_struct set_multicast;
318         struct work_struct set_vlan;
319         u32 wol;
320 };
321
322 enum skb_state {
323         illegal = 0,
324         tx_start,
325         tx_done,
326         rx_start,
327         rx_done,
328         rx_cleanup,
329         unlink_start
330 };
331
332 struct skb_data {               /* skb->cb is one of these */
333         struct urb *urb;
334         struct lan78xx_net *dev;
335         enum skb_state state;
336         size_t length;
337         int num_of_packet;
338 };
339
340 struct usb_context {
341         struct usb_ctrlrequest req;
342         struct lan78xx_net *dev;
343 };
344
345 #define EVENT_TX_HALT                   0
346 #define EVENT_RX_HALT                   1
347 #define EVENT_RX_MEMORY                 2
348 #define EVENT_STS_SPLIT                 3
349 #define EVENT_LINK_RESET                4
350 #define EVENT_RX_PAUSED                 5
351 #define EVENT_DEV_WAKING                6
352 #define EVENT_DEV_ASLEEP                7
353 #define EVENT_DEV_OPEN                  8
354 #define EVENT_STAT_UPDATE               9
355
356 struct statstage {
357         struct mutex                    access_lock;    /* for stats access */
358         struct lan78xx_statstage        saved;
359         struct lan78xx_statstage        rollover_count;
360         struct lan78xx_statstage        rollover_max;
361         struct lan78xx_statstage64      curr_stat;
362 };
363
364 struct irq_domain_data {
365         struct irq_domain       *irqdomain;
366         unsigned int            phyirq;
367         struct irq_chip         *irqchip;
368         irq_flow_handler_t      irq_handler;
369         u32                     irqenable;
370         struct mutex            irq_lock;               /* for irq bus access */
371 };
372
373 struct lan78xx_net {
374         struct net_device       *net;
375         struct usb_device       *udev;
376         struct usb_interface    *intf;
377         void                    *driver_priv;
378
379         int                     rx_qlen;
380         int                     tx_qlen;
381         struct sk_buff_head     rxq;
382         struct sk_buff_head     txq;
383         struct sk_buff_head     done;
384         struct sk_buff_head     rxq_pause;
385         struct sk_buff_head     txq_pend;
386
387         struct tasklet_struct   bh;
388         struct delayed_work     wq;
389
390         struct usb_host_endpoint *ep_blkin;
391         struct usb_host_endpoint *ep_blkout;
392         struct usb_host_endpoint *ep_intr;
393
394         int                     msg_enable;
395
396         struct urb              *urb_intr;
397         struct usb_anchor       deferred;
398
399         struct mutex            phy_mutex; /* for phy access */
400         unsigned                pipe_in, pipe_out, pipe_intr;
401
402         u32                     hard_mtu;       /* count any extra framing */
403         size_t                  rx_urb_size;    /* size for rx urbs */
404
405         unsigned long           flags;
406
407         wait_queue_head_t       *wait;
408         unsigned char           suspend_count;
409
410         unsigned                maxpacket;
411         struct timer_list       delay;
412         struct timer_list       stat_monitor;
413
414         unsigned long           data[5];
415
416         int                     link_on;
417         u8                      mdix_ctrl;
418
419         u32                     chipid;
420         u32                     chiprev;
421         struct mii_bus          *mdiobus;
422         phy_interface_t         interface;
423
424         int                     fc_autoneg;
425         u8                      fc_request_control;
426
427         int                     delta;
428         struct statstage        stats;
429
430         struct irq_domain_data  domain_data;
431 };
432
433 /* define external phy id */
434 #define PHY_LAN8835                     (0x0007C130)
435 #define PHY_KSZ9031RNX                  (0x00221620)
436
437 /* use ethtool to change the level for any given device */
438 static int msg_level = -1;
439 module_param(msg_level, int, 0);
440 MODULE_PARM_DESC(msg_level, "Override default message level");
441
442 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
443 {
444         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445         int ret;
446
447         if (!buf)
448                 return -ENOMEM;
449
450         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
451                               USB_VENDOR_REQUEST_READ_REGISTER,
452                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
453                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
454         if (likely(ret >= 0)) {
455                 le32_to_cpus(buf);
456                 *data = *buf;
457         } else {
458                 netdev_warn(dev->net,
459                             "Failed to read register index 0x%08x. ret = %d",
460                             index, ret);
461         }
462
463         kfree(buf);
464
465         return ret;
466 }
467
468 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
469 {
470         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
471         int ret;
472
473         if (!buf)
474                 return -ENOMEM;
475
476         *buf = data;
477         cpu_to_le32s(buf);
478
479         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
480                               USB_VENDOR_REQUEST_WRITE_REGISTER,
481                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
482                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
483         if (unlikely(ret < 0)) {
484                 netdev_warn(dev->net,
485                             "Failed to write register index 0x%08x. ret = %d",
486                             index, ret);
487         }
488
489         kfree(buf);
490
491         return ret;
492 }
493
494 static int lan78xx_read_stats(struct lan78xx_net *dev,
495                               struct lan78xx_statstage *data)
496 {
497         int ret = 0;
498         int i;
499         struct lan78xx_statstage *stats;
500         u32 *src;
501         u32 *dst;
502
503         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
504         if (!stats)
505                 return -ENOMEM;
506
507         ret = usb_control_msg(dev->udev,
508                               usb_rcvctrlpipe(dev->udev, 0),
509                               USB_VENDOR_REQUEST_GET_STATS,
510                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
511                               0,
512                               0,
513                               (void *)stats,
514                               sizeof(*stats),
515                               USB_CTRL_SET_TIMEOUT);
516         if (likely(ret >= 0)) {
517                 src = (u32 *)stats;
518                 dst = (u32 *)data;
519                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
520                         le32_to_cpus(&src[i]);
521                         dst[i] = src[i];
522                 }
523         } else {
524                 netdev_warn(dev->net,
525                             "Failed to read stat ret = 0x%x", ret);
526         }
527
528         kfree(stats);
529
530         return ret;
531 }
532
533 #define check_counter_rollover(struct1, dev_stats, member) {    \
534         if (struct1->member < dev_stats.saved.member)           \
535                 dev_stats.rollover_count.member++;              \
536         }
537
538 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
539                                         struct lan78xx_statstage *stats)
540 {
541         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
542         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
543         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
544         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
545         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
546         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
547         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
548         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
549         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
550         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
551         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
552         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
553         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
554         check_counter_rollover(stats, dev->stats, rx_pause_frames);
555         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
556         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
557         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
558         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
559         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
560         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
561         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
562         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
563         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
564         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
565         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
566         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
567         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
568         check_counter_rollover(stats, dev->stats, tx_single_collisions);
569         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
570         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
571         check_counter_rollover(stats, dev->stats, tx_late_collisions);
572         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
573         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
574         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
575         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
576         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
577         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
578         check_counter_rollover(stats, dev->stats, tx_pause_frames);
579         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
580         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
581         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
582         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
583         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
584         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
585         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
586         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
587         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
588
589         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
590 }
591
592 static void lan78xx_update_stats(struct lan78xx_net *dev)
593 {
594         u32 *p, *count, *max;
595         u64 *data;
596         int i;
597         struct lan78xx_statstage lan78xx_stats;
598
599         if (usb_autopm_get_interface(dev->intf) < 0)
600                 return;
601
602         p = (u32 *)&lan78xx_stats;
603         count = (u32 *)&dev->stats.rollover_count;
604         max = (u32 *)&dev->stats.rollover_max;
605         data = (u64 *)&dev->stats.curr_stat;
606
607         mutex_lock(&dev->stats.access_lock);
608
609         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
610                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
611
612         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
613                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
614
615         mutex_unlock(&dev->stats.access_lock);
616
617         usb_autopm_put_interface(dev->intf);
618 }
619
620 /* Loop until the read is completed with timeout called with phy_mutex held */
621 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
622 {
623         unsigned long start_time = jiffies;
624         u32 val;
625         int ret;
626
627         do {
628                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
629                 if (unlikely(ret < 0))
630                         return -EIO;
631
632                 if (!(val & MII_ACC_MII_BUSY_))
633                         return 0;
634         } while (!time_after(jiffies, start_time + HZ));
635
636         return -EIO;
637 }
638
639 static inline u32 mii_access(int id, int index, int read)
640 {
641         u32 ret;
642
643         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
644         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
645         if (read)
646                 ret |= MII_ACC_MII_READ_;
647         else
648                 ret |= MII_ACC_MII_WRITE_;
649         ret |= MII_ACC_MII_BUSY_;
650
651         return ret;
652 }
653
654 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
655 {
656         unsigned long start_time = jiffies;
657         u32 val;
658         int ret;
659
660         do {
661                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
662                 if (unlikely(ret < 0))
663                         return -EIO;
664
665                 if (!(val & E2P_CMD_EPC_BUSY_) ||
666                     (val & E2P_CMD_EPC_TIMEOUT_))
667                         break;
668                 usleep_range(40, 100);
669         } while (!time_after(jiffies, start_time + HZ));
670
671         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
672                 netdev_warn(dev->net, "EEPROM read operation timeout");
673                 return -EIO;
674         }
675
676         return 0;
677 }
678
679 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
680 {
681         unsigned long start_time = jiffies;
682         u32 val;
683         int ret;
684
685         do {
686                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
687                 if (unlikely(ret < 0))
688                         return -EIO;
689
690                 if (!(val & E2P_CMD_EPC_BUSY_))
691                         return 0;
692
693                 usleep_range(40, 100);
694         } while (!time_after(jiffies, start_time + HZ));
695
696         netdev_warn(dev->net, "EEPROM is busy");
697         return -EIO;
698 }
699
700 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
701                                    u32 length, u8 *data)
702 {
703         u32 val;
704         u32 saved;
705         int i, ret;
706         int retval;
707
708         /* depends on chip, some EEPROM pins are muxed with LED function.
709          * disable & restore LED function to access EEPROM.
710          */
711         ret = lan78xx_read_reg(dev, HW_CFG, &val);
712         saved = val;
713         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
714                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
715                 ret = lan78xx_write_reg(dev, HW_CFG, val);
716         }
717
718         retval = lan78xx_eeprom_confirm_not_busy(dev);
719         if (retval)
720                 return retval;
721
722         for (i = 0; i < length; i++) {
723                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
724                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
725                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
726                 if (unlikely(ret < 0)) {
727                         retval = -EIO;
728                         goto exit;
729                 }
730
731                 retval = lan78xx_wait_eeprom(dev);
732                 if (retval < 0)
733                         goto exit;
734
735                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
736                 if (unlikely(ret < 0)) {
737                         retval = -EIO;
738                         goto exit;
739                 }
740
741                 data[i] = val & 0xFF;
742                 offset++;
743         }
744
745         retval = 0;
746 exit:
747         if (dev->chipid == ID_REV_CHIP_ID_7800_)
748                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
749
750         return retval;
751 }
752
753 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
754                                u32 length, u8 *data)
755 {
756         u8 sig;
757         int ret;
758
759         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
760         if ((ret == 0) && (sig == EEPROM_INDICATOR))
761                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
762         else
763                 ret = -EINVAL;
764
765         return ret;
766 }
767
768 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
769                                     u32 length, u8 *data)
770 {
771         u32 val;
772         u32 saved;
773         int i, ret;
774         int retval;
775
776         /* depends on chip, some EEPROM pins are muxed with LED function.
777          * disable & restore LED function to access EEPROM.
778          */
779         ret = lan78xx_read_reg(dev, HW_CFG, &val);
780         saved = val;
781         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
782                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
783                 ret = lan78xx_write_reg(dev, HW_CFG, val);
784         }
785
786         retval = lan78xx_eeprom_confirm_not_busy(dev);
787         if (retval)
788                 goto exit;
789
790         /* Issue write/erase enable command */
791         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
792         ret = lan78xx_write_reg(dev, E2P_CMD, val);
793         if (unlikely(ret < 0)) {
794                 retval = -EIO;
795                 goto exit;
796         }
797
798         retval = lan78xx_wait_eeprom(dev);
799         if (retval < 0)
800                 goto exit;
801
802         for (i = 0; i < length; i++) {
803                 /* Fill data register */
804                 val = data[i];
805                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
806                 if (ret < 0) {
807                         retval = -EIO;
808                         goto exit;
809                 }
810
811                 /* Send "write" command */
812                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
813                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
814                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
815                 if (ret < 0) {
816                         retval = -EIO;
817                         goto exit;
818                 }
819
820                 retval = lan78xx_wait_eeprom(dev);
821                 if (retval < 0)
822                         goto exit;
823
824                 offset++;
825         }
826
827         retval = 0;
828 exit:
829         if (dev->chipid == ID_REV_CHIP_ID_7800_)
830                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
831
832         return retval;
833 }
834
835 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
836                                 u32 length, u8 *data)
837 {
838         int i;
839         int ret;
840         u32 buf;
841         unsigned long timeout;
842
843         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
844
845         if (buf & OTP_PWR_DN_PWRDN_N_) {
846                 /* clear it and wait to be cleared */
847                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
848
849                 timeout = jiffies + HZ;
850                 do {
851                         usleep_range(1, 10);
852                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
853                         if (time_after(jiffies, timeout)) {
854                                 netdev_warn(dev->net,
855                                             "timeout on OTP_PWR_DN");
856                                 return -EIO;
857                         }
858                 } while (buf & OTP_PWR_DN_PWRDN_N_);
859         }
860
861         for (i = 0; i < length; i++) {
862                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
863                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
864                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
865                                         ((offset + i) & OTP_ADDR2_10_3));
866
867                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
868                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
869
870                 timeout = jiffies + HZ;
871                 do {
872                         udelay(1);
873                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
874                         if (time_after(jiffies, timeout)) {
875                                 netdev_warn(dev->net,
876                                             "timeout on OTP_STATUS");
877                                 return -EIO;
878                         }
879                 } while (buf & OTP_STATUS_BUSY_);
880
881                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
882
883                 data[i] = (u8)(buf & 0xFF);
884         }
885
886         return 0;
887 }
888
889 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
890                                  u32 length, u8 *data)
891 {
892         int i;
893         int ret;
894         u32 buf;
895         unsigned long timeout;
896
897         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
898
899         if (buf & OTP_PWR_DN_PWRDN_N_) {
900                 /* clear it and wait to be cleared */
901                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
902
903                 timeout = jiffies + HZ;
904                 do {
905                         udelay(1);
906                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
907                         if (time_after(jiffies, timeout)) {
908                                 netdev_warn(dev->net,
909                                             "timeout on OTP_PWR_DN completion");
910                                 return -EIO;
911                         }
912                 } while (buf & OTP_PWR_DN_PWRDN_N_);
913         }
914
915         /* set to BYTE program mode */
916         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
917
918         for (i = 0; i < length; i++) {
919                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
920                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
921                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
922                                         ((offset + i) & OTP_ADDR2_10_3));
923                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
924                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
925                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
926
927                 timeout = jiffies + HZ;
928                 do {
929                         udelay(1);
930                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
931                         if (time_after(jiffies, timeout)) {
932                                 netdev_warn(dev->net,
933                                             "Timeout on OTP_STATUS completion");
934                                 return -EIO;
935                         }
936                 } while (buf & OTP_STATUS_BUSY_);
937         }
938
939         return 0;
940 }
941
942 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
943                             u32 length, u8 *data)
944 {
945         u8 sig;
946         int ret;
947
948         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
949
950         if (ret == 0) {
951                 if (sig == OTP_INDICATOR_1)
952                         offset = offset;
953                 else if (sig == OTP_INDICATOR_2)
954                         offset += 0x100;
955                 else
956                         ret = -EINVAL;
957                 if (!ret)
958                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
959         }
960
961         return ret;
962 }
963
964 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
965 {
966         int i, ret;
967
968         for (i = 0; i < 100; i++) {
969                 u32 dp_sel;
970
971                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
972                 if (unlikely(ret < 0))
973                         return -EIO;
974
975                 if (dp_sel & DP_SEL_DPRDY_)
976                         return 0;
977
978                 usleep_range(40, 100);
979         }
980
981         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
982
983         return -EIO;
984 }
985
986 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
987                                   u32 addr, u32 length, u32 *buf)
988 {
989         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
990         u32 dp_sel;
991         int i, ret;
992
993         if (usb_autopm_get_interface(dev->intf) < 0)
994                         return 0;
995
996         mutex_lock(&pdata->dataport_mutex);
997
998         ret = lan78xx_dataport_wait_not_busy(dev);
999         if (ret < 0)
1000                 goto done;
1001
1002         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1003
1004         dp_sel &= ~DP_SEL_RSEL_MASK_;
1005         dp_sel |= ram_select;
1006         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1007
1008         for (i = 0; i < length; i++) {
1009                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1010
1011                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1012
1013                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1014
1015                 ret = lan78xx_dataport_wait_not_busy(dev);
1016                 if (ret < 0)
1017                         goto done;
1018         }
1019
1020 done:
1021         mutex_unlock(&pdata->dataport_mutex);
1022         usb_autopm_put_interface(dev->intf);
1023
1024         return ret;
1025 }
1026
1027 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1028                                     int index, u8 addr[ETH_ALEN])
1029 {
1030         u32     temp;
1031
1032         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1033                 temp = addr[3];
1034                 temp = addr[2] | (temp << 8);
1035                 temp = addr[1] | (temp << 8);
1036                 temp = addr[0] | (temp << 8);
1037                 pdata->pfilter_table[index][1] = temp;
1038                 temp = addr[5];
1039                 temp = addr[4] | (temp << 8);
1040                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1041                 pdata->pfilter_table[index][0] = temp;
1042         }
1043 }
1044
1045 /* returns hash bit number for given MAC address */
1046 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1047 {
1048         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1049 }
1050
1051 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1052 {
1053         struct lan78xx_priv *pdata =
1054                         container_of(param, struct lan78xx_priv, set_multicast);
1055         struct lan78xx_net *dev = pdata->dev;
1056         int i;
1057         int ret;
1058
1059         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1060                   pdata->rfe_ctl);
1061
1062         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1063                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1064
1065         for (i = 1; i < NUM_OF_MAF; i++) {
1066                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1067                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1068                                         pdata->pfilter_table[i][1]);
1069                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1070                                         pdata->pfilter_table[i][0]);
1071         }
1072
1073         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1074 }
1075
1076 static void lan78xx_set_multicast(struct net_device *netdev)
1077 {
1078         struct lan78xx_net *dev = netdev_priv(netdev);
1079         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1080         unsigned long flags;
1081         int i;
1082
1083         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1084
1085         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1086                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1087
1088         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1089                         pdata->mchash_table[i] = 0;
1090         /* pfilter_table[0] has own HW address */
1091         for (i = 1; i < NUM_OF_MAF; i++) {
1092                         pdata->pfilter_table[i][0] =
1093                         pdata->pfilter_table[i][1] = 0;
1094         }
1095
1096         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1097
1098         if (dev->net->flags & IFF_PROMISC) {
1099                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1100                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1101         } else {
1102                 if (dev->net->flags & IFF_ALLMULTI) {
1103                         netif_dbg(dev, drv, dev->net,
1104                                   "receive all multicast enabled");
1105                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1106                 }
1107         }
1108
1109         if (netdev_mc_count(dev->net)) {
1110                 struct netdev_hw_addr *ha;
1111                 int i;
1112
1113                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1114
1115                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1116
1117                 i = 1;
1118                 netdev_for_each_mc_addr(ha, netdev) {
1119                         /* set first 32 into Perfect Filter */
1120                         if (i < 33) {
1121                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1122                         } else {
1123                                 u32 bitnum = lan78xx_hash(ha->addr);
1124
1125                                 pdata->mchash_table[bitnum / 32] |=
1126                                                         (1 << (bitnum % 32));
1127                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1128                         }
1129                         i++;
1130                 }
1131         }
1132
1133         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1134
1135         /* defer register writes to a sleepable context */
1136         schedule_work(&pdata->set_multicast);
1137 }
1138
1139 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1140                                       u16 lcladv, u16 rmtadv)
1141 {
1142         u32 flow = 0, fct_flow = 0;
1143         int ret;
1144         u8 cap;
1145
1146         if (dev->fc_autoneg)
1147                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1148         else
1149                 cap = dev->fc_request_control;
1150
1151         if (cap & FLOW_CTRL_TX)
1152                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1153
1154         if (cap & FLOW_CTRL_RX)
1155                 flow |= FLOW_CR_RX_FCEN_;
1156
1157         if (dev->udev->speed == USB_SPEED_SUPER)
1158                 fct_flow = 0x817;
1159         else if (dev->udev->speed == USB_SPEED_HIGH)
1160                 fct_flow = 0x211;
1161
1162         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1163                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1164                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1165
1166         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1167
1168         /* threshold value should be set before enabling flow */
1169         ret = lan78xx_write_reg(dev, FLOW, flow);
1170
1171         return 0;
1172 }
1173
1174 static int lan78xx_link_reset(struct lan78xx_net *dev)
1175 {
1176         struct phy_device *phydev = dev->net->phydev;
1177         struct ethtool_link_ksettings ecmd;
1178         int ladv, radv, ret;
1179         u32 buf;
1180
1181         /* clear LAN78xx interrupt status */
1182         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1183         if (unlikely(ret < 0))
1184                 return -EIO;
1185
1186         phy_read_status(phydev);
1187
1188         if (!phydev->link && dev->link_on) {
1189                 dev->link_on = false;
1190
1191                 /* reset MAC */
1192                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1193                 if (unlikely(ret < 0))
1194                         return -EIO;
1195                 buf |= MAC_CR_RST_;
1196                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1197                 if (unlikely(ret < 0))
1198                         return -EIO;
1199
1200                 del_timer(&dev->stat_monitor);
1201         } else if (phydev->link && !dev->link_on) {
1202                 dev->link_on = true;
1203
1204                 phy_ethtool_ksettings_get(phydev, &ecmd);
1205
1206                 if (dev->udev->speed == USB_SPEED_SUPER) {
1207                         if (ecmd.base.speed == 1000) {
1208                                 /* disable U2 */
1209                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1210                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1211                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1212                                 /* enable U1 */
1213                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1214                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1215                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1216                         } else {
1217                                 /* enable U1 & U2 */
1218                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1219                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1220                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1221                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1222                         }
1223                 }
1224
1225                 ladv = phy_read(phydev, MII_ADVERTISE);
1226                 if (ladv < 0)
1227                         return ladv;
1228
1229                 radv = phy_read(phydev, MII_LPA);
1230                 if (radv < 0)
1231                         return radv;
1232
1233                 netif_dbg(dev, link, dev->net,
1234                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1235                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1236
1237                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1238                                                  radv);
1239
1240                 if (!timer_pending(&dev->stat_monitor)) {
1241                         dev->delta = 1;
1242                         mod_timer(&dev->stat_monitor,
1243                                   jiffies + STAT_UPDATE_TIMER);
1244                 }
1245         }
1246
1247         return ret;
1248 }
1249
1250 /* some work can't be done in tasklets, so we use keventd
1251  *
1252  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1253  * but tasklet_schedule() doesn't.      hope the failure is rare.
1254  */
1255 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1256 {
1257         set_bit(work, &dev->flags);
1258         if (!schedule_delayed_work(&dev->wq, 0))
1259                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1260 }
1261
1262 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1263 {
1264         u32 intdata;
1265
1266         if (urb->actual_length != 4) {
1267                 netdev_warn(dev->net,
1268                             "unexpected urb length %d", urb->actual_length);
1269                 return;
1270         }
1271
1272         memcpy(&intdata, urb->transfer_buffer, 4);
1273         le32_to_cpus(&intdata);
1274
1275         if (intdata & INT_ENP_PHY_INT) {
1276                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1277                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1278
1279                 if (dev->domain_data.phyirq > 0)
1280                         generic_handle_irq(dev->domain_data.phyirq);
1281         } else
1282                 netdev_warn(dev->net,
1283                             "unexpected interrupt: 0x%08x\n", intdata);
1284 }
1285
1286 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1287 {
1288         return MAX_EEPROM_SIZE;
1289 }
1290
1291 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1292                                       struct ethtool_eeprom *ee, u8 *data)
1293 {
1294         struct lan78xx_net *dev = netdev_priv(netdev);
1295         int ret;
1296
1297         ret = usb_autopm_get_interface(dev->intf);
1298         if (ret)
1299                 return ret;
1300
1301         ee->magic = LAN78XX_EEPROM_MAGIC;
1302
1303         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1304
1305         usb_autopm_put_interface(dev->intf);
1306
1307         return ret;
1308 }
1309
1310 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1311                                       struct ethtool_eeprom *ee, u8 *data)
1312 {
1313         struct lan78xx_net *dev = netdev_priv(netdev);
1314         int ret;
1315
1316         ret = usb_autopm_get_interface(dev->intf);
1317         if (ret)
1318                 return ret;
1319
1320         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1321          * to load data from EEPROM
1322          */
1323         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1324                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1325         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1326                  (ee->offset == 0) &&
1327                  (ee->len == 512) &&
1328                  (data[0] == OTP_INDICATOR_1))
1329                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1330
1331         usb_autopm_put_interface(dev->intf);
1332
1333         return ret;
1334 }
1335
1336 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1337                                 u8 *data)
1338 {
1339         if (stringset == ETH_SS_STATS)
1340                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1341 }
1342
1343 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1344 {
1345         if (sset == ETH_SS_STATS)
1346                 return ARRAY_SIZE(lan78xx_gstrings);
1347         else
1348                 return -EOPNOTSUPP;
1349 }
1350
1351 static void lan78xx_get_stats(struct net_device *netdev,
1352                               struct ethtool_stats *stats, u64 *data)
1353 {
1354         struct lan78xx_net *dev = netdev_priv(netdev);
1355
1356         lan78xx_update_stats(dev);
1357
1358         mutex_lock(&dev->stats.access_lock);
1359         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1360         mutex_unlock(&dev->stats.access_lock);
1361 }
1362
1363 static void lan78xx_get_wol(struct net_device *netdev,
1364                             struct ethtool_wolinfo *wol)
1365 {
1366         struct lan78xx_net *dev = netdev_priv(netdev);
1367         int ret;
1368         u32 buf;
1369         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370
1371         if (usb_autopm_get_interface(dev->intf) < 0)
1372                         return;
1373
1374         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1375         if (unlikely(ret < 0)) {
1376                 wol->supported = 0;
1377                 wol->wolopts = 0;
1378         } else {
1379                 if (buf & USB_CFG_RMT_WKP_) {
1380                         wol->supported = WAKE_ALL;
1381                         wol->wolopts = pdata->wol;
1382                 } else {
1383                         wol->supported = 0;
1384                         wol->wolopts = 0;
1385                 }
1386         }
1387
1388         usb_autopm_put_interface(dev->intf);
1389 }
1390
1391 static int lan78xx_set_wol(struct net_device *netdev,
1392                            struct ethtool_wolinfo *wol)
1393 {
1394         struct lan78xx_net *dev = netdev_priv(netdev);
1395         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1396         int ret;
1397
1398         ret = usb_autopm_get_interface(dev->intf);
1399         if (ret < 0)
1400                 return ret;
1401
1402         pdata->wol = 0;
1403         if (wol->wolopts & WAKE_UCAST)
1404                 pdata->wol |= WAKE_UCAST;
1405         if (wol->wolopts & WAKE_MCAST)
1406                 pdata->wol |= WAKE_MCAST;
1407         if (wol->wolopts & WAKE_BCAST)
1408                 pdata->wol |= WAKE_BCAST;
1409         if (wol->wolopts & WAKE_MAGIC)
1410                 pdata->wol |= WAKE_MAGIC;
1411         if (wol->wolopts & WAKE_PHY)
1412                 pdata->wol |= WAKE_PHY;
1413         if (wol->wolopts & WAKE_ARP)
1414                 pdata->wol |= WAKE_ARP;
1415
1416         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1417
1418         phy_ethtool_set_wol(netdev->phydev, wol);
1419
1420         usb_autopm_put_interface(dev->intf);
1421
1422         return ret;
1423 }
1424
1425 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1426 {
1427         struct lan78xx_net *dev = netdev_priv(net);
1428         struct phy_device *phydev = net->phydev;
1429         int ret;
1430         u32 buf;
1431
1432         ret = usb_autopm_get_interface(dev->intf);
1433         if (ret < 0)
1434                 return ret;
1435
1436         ret = phy_ethtool_get_eee(phydev, edata);
1437         if (ret < 0)
1438                 goto exit;
1439
1440         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441         if (buf & MAC_CR_EEE_EN_) {
1442                 edata->eee_enabled = true;
1443                 edata->eee_active = !!(edata->advertised &
1444                                        edata->lp_advertised);
1445                 edata->tx_lpi_enabled = true;
1446                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1447                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1448                 edata->tx_lpi_timer = buf;
1449         } else {
1450                 edata->eee_enabled = false;
1451                 edata->eee_active = false;
1452                 edata->tx_lpi_enabled = false;
1453                 edata->tx_lpi_timer = 0;
1454         }
1455
1456         ret = 0;
1457 exit:
1458         usb_autopm_put_interface(dev->intf);
1459
1460         return ret;
1461 }
1462
1463 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1464 {
1465         struct lan78xx_net *dev = netdev_priv(net);
1466         int ret;
1467         u32 buf;
1468
1469         ret = usb_autopm_get_interface(dev->intf);
1470         if (ret < 0)
1471                 return ret;
1472
1473         if (edata->eee_enabled) {
1474                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1475                 buf |= MAC_CR_EEE_EN_;
1476                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1477
1478                 phy_ethtool_set_eee(net->phydev, edata);
1479
1480                 buf = (u32)edata->tx_lpi_timer;
1481                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1482         } else {
1483                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1484                 buf &= ~MAC_CR_EEE_EN_;
1485                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1486         }
1487
1488         usb_autopm_put_interface(dev->intf);
1489
1490         return 0;
1491 }
1492
1493 static u32 lan78xx_get_link(struct net_device *net)
1494 {
1495         phy_read_status(net->phydev);
1496
1497         return net->phydev->link;
1498 }
1499
1500 static void lan78xx_get_drvinfo(struct net_device *net,
1501                                 struct ethtool_drvinfo *info)
1502 {
1503         struct lan78xx_net *dev = netdev_priv(net);
1504
1505         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1506         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1507 }
1508
1509 static u32 lan78xx_get_msglevel(struct net_device *net)
1510 {
1511         struct lan78xx_net *dev = netdev_priv(net);
1512
1513         return dev->msg_enable;
1514 }
1515
1516 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1517 {
1518         struct lan78xx_net *dev = netdev_priv(net);
1519
1520         dev->msg_enable = level;
1521 }
1522
1523 static int lan78xx_get_link_ksettings(struct net_device *net,
1524                                       struct ethtool_link_ksettings *cmd)
1525 {
1526         struct lan78xx_net *dev = netdev_priv(net);
1527         struct phy_device *phydev = net->phydev;
1528         int ret;
1529
1530         ret = usb_autopm_get_interface(dev->intf);
1531         if (ret < 0)
1532                 return ret;
1533
1534         phy_ethtool_ksettings_get(phydev, cmd);
1535
1536         usb_autopm_put_interface(dev->intf);
1537
1538         return ret;
1539 }
1540
1541 static int lan78xx_set_link_ksettings(struct net_device *net,
1542                                       const struct ethtool_link_ksettings *cmd)
1543 {
1544         struct lan78xx_net *dev = netdev_priv(net);
1545         struct phy_device *phydev = net->phydev;
1546         int ret = 0;
1547         int temp;
1548
1549         ret = usb_autopm_get_interface(dev->intf);
1550         if (ret < 0)
1551                 return ret;
1552
1553         /* change speed & duplex */
1554         ret = phy_ethtool_ksettings_set(phydev, cmd);
1555
1556         if (!cmd->base.autoneg) {
1557                 /* force link down */
1558                 temp = phy_read(phydev, MII_BMCR);
1559                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1560                 mdelay(1);
1561                 phy_write(phydev, MII_BMCR, temp);
1562         }
1563
1564         usb_autopm_put_interface(dev->intf);
1565
1566         return ret;
1567 }
1568
1569 static void lan78xx_get_pause(struct net_device *net,
1570                               struct ethtool_pauseparam *pause)
1571 {
1572         struct lan78xx_net *dev = netdev_priv(net);
1573         struct phy_device *phydev = net->phydev;
1574         struct ethtool_link_ksettings ecmd;
1575
1576         phy_ethtool_ksettings_get(phydev, &ecmd);
1577
1578         pause->autoneg = dev->fc_autoneg;
1579
1580         if (dev->fc_request_control & FLOW_CTRL_TX)
1581                 pause->tx_pause = 1;
1582
1583         if (dev->fc_request_control & FLOW_CTRL_RX)
1584                 pause->rx_pause = 1;
1585 }
1586
1587 static int lan78xx_set_pause(struct net_device *net,
1588                              struct ethtool_pauseparam *pause)
1589 {
1590         struct lan78xx_net *dev = netdev_priv(net);
1591         struct phy_device *phydev = net->phydev;
1592         struct ethtool_link_ksettings ecmd;
1593         int ret;
1594
1595         phy_ethtool_ksettings_get(phydev, &ecmd);
1596
1597         if (pause->autoneg && !ecmd.base.autoneg) {
1598                 ret = -EINVAL;
1599                 goto exit;
1600         }
1601
1602         dev->fc_request_control = 0;
1603         if (pause->rx_pause)
1604                 dev->fc_request_control |= FLOW_CTRL_RX;
1605
1606         if (pause->tx_pause)
1607                 dev->fc_request_control |= FLOW_CTRL_TX;
1608
1609         if (ecmd.base.autoneg) {
1610                 u32 mii_adv;
1611                 u32 advertising;
1612
1613                 ethtool_convert_link_mode_to_legacy_u32(
1614                         &advertising, ecmd.link_modes.advertising);
1615
1616                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1617                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1618                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1619
1620                 ethtool_convert_legacy_u32_to_link_mode(
1621                         ecmd.link_modes.advertising, advertising);
1622
1623                 phy_ethtool_ksettings_set(phydev, &ecmd);
1624         }
1625
1626         dev->fc_autoneg = pause->autoneg;
1627
1628         ret = 0;
1629 exit:
1630         return ret;
1631 }
1632
1633 static int lan78xx_get_regs_len(struct net_device *netdev)
1634 {
1635         if (!netdev->phydev)
1636                 return (sizeof(lan78xx_regs));
1637         else
1638                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1639 }
1640
1641 static void
1642 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1643                  void *buf)
1644 {
1645         u32 *data = buf;
1646         int i, j;
1647         struct lan78xx_net *dev = netdev_priv(netdev);
1648
1649         /* Read Device/MAC registers */
1650         for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1651                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1652
1653         if (!netdev->phydev)
1654                 return;
1655
1656         /* Read PHY registers */
1657         for (j = 0; j < 32; i++, j++)
1658                 data[i] = phy_read(netdev->phydev, j);
1659 }
1660
1661 static const struct ethtool_ops lan78xx_ethtool_ops = {
1662         .get_link       = lan78xx_get_link,
1663         .nway_reset     = phy_ethtool_nway_reset,
1664         .get_drvinfo    = lan78xx_get_drvinfo,
1665         .get_msglevel   = lan78xx_get_msglevel,
1666         .set_msglevel   = lan78xx_set_msglevel,
1667         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1668         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1669         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1670         .get_ethtool_stats = lan78xx_get_stats,
1671         .get_sset_count = lan78xx_get_sset_count,
1672         .get_strings    = lan78xx_get_strings,
1673         .get_wol        = lan78xx_get_wol,
1674         .set_wol        = lan78xx_set_wol,
1675         .get_eee        = lan78xx_get_eee,
1676         .set_eee        = lan78xx_set_eee,
1677         .get_pauseparam = lan78xx_get_pause,
1678         .set_pauseparam = lan78xx_set_pause,
1679         .get_link_ksettings = lan78xx_get_link_ksettings,
1680         .set_link_ksettings = lan78xx_set_link_ksettings,
1681         .get_regs_len   = lan78xx_get_regs_len,
1682         .get_regs       = lan78xx_get_regs,
1683 };
1684
1685 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1686 {
1687         if (!netif_running(netdev))
1688                 return -EINVAL;
1689
1690         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1691 }
1692
1693 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1694 {
1695         u32 addr_lo, addr_hi;
1696         int ret;
1697         u8 addr[6];
1698
1699         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1700         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1701
1702         addr[0] = addr_lo & 0xFF;
1703         addr[1] = (addr_lo >> 8) & 0xFF;
1704         addr[2] = (addr_lo >> 16) & 0xFF;
1705         addr[3] = (addr_lo >> 24) & 0xFF;
1706         addr[4] = addr_hi & 0xFF;
1707         addr[5] = (addr_hi >> 8) & 0xFF;
1708
1709         if (!is_valid_ether_addr(addr)) {
1710                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1711                         /* valid address present in Device Tree */
1712                         netif_dbg(dev, ifup, dev->net,
1713                                   "MAC address read from Device Tree");
1714                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1715                                                  ETH_ALEN, addr) == 0) ||
1716                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1717                                               ETH_ALEN, addr) == 0)) &&
1718                            is_valid_ether_addr(addr)) {
1719                         /* eeprom values are valid so use them */
1720                         netif_dbg(dev, ifup, dev->net,
1721                                   "MAC address read from EEPROM");
1722                 } else {
1723                         /* generate random MAC */
1724                         random_ether_addr(addr);
1725                         netif_dbg(dev, ifup, dev->net,
1726                                   "MAC address set to random addr");
1727                 }
1728
1729                 addr_lo = addr[0] | (addr[1] << 8) |
1730                           (addr[2] << 16) | (addr[3] << 24);
1731                 addr_hi = addr[4] | (addr[5] << 8);
1732
1733                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1734                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1735         }
1736
1737         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1738         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1739
1740         ether_addr_copy(dev->net->dev_addr, addr);
1741 }
1742
1743 /* MDIO read and write wrappers for phylib */
1744 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1745 {
1746         struct lan78xx_net *dev = bus->priv;
1747         u32 val, addr;
1748         int ret;
1749
1750         ret = usb_autopm_get_interface(dev->intf);
1751         if (ret < 0)
1752                 return ret;
1753
1754         mutex_lock(&dev->phy_mutex);
1755
1756         /* confirm MII not busy */
1757         ret = lan78xx_phy_wait_not_busy(dev);
1758         if (ret < 0)
1759                 goto done;
1760
1761         /* set the address, index & direction (read from PHY) */
1762         addr = mii_access(phy_id, idx, MII_READ);
1763         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1764
1765         ret = lan78xx_phy_wait_not_busy(dev);
1766         if (ret < 0)
1767                 goto done;
1768
1769         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1770
1771         ret = (int)(val & 0xFFFF);
1772
1773 done:
1774         mutex_unlock(&dev->phy_mutex);
1775         usb_autopm_put_interface(dev->intf);
1776
1777         return ret;
1778 }
1779
1780 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1781                                  u16 regval)
1782 {
1783         struct lan78xx_net *dev = bus->priv;
1784         u32 val, addr;
1785         int ret;
1786
1787         ret = usb_autopm_get_interface(dev->intf);
1788         if (ret < 0)
1789                 return ret;
1790
1791         mutex_lock(&dev->phy_mutex);
1792
1793         /* confirm MII not busy */
1794         ret = lan78xx_phy_wait_not_busy(dev);
1795         if (ret < 0)
1796                 goto done;
1797
1798         val = (u32)regval;
1799         ret = lan78xx_write_reg(dev, MII_DATA, val);
1800
1801         /* set the address, index & direction (write to PHY) */
1802         addr = mii_access(phy_id, idx, MII_WRITE);
1803         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1804
1805         ret = lan78xx_phy_wait_not_busy(dev);
1806         if (ret < 0)
1807                 goto done;
1808
1809 done:
1810         mutex_unlock(&dev->phy_mutex);
1811         usb_autopm_put_interface(dev->intf);
1812         return 0;
1813 }
1814
1815 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1816 {
1817         struct device_node *node;
1818         int ret;
1819
1820         dev->mdiobus = mdiobus_alloc();
1821         if (!dev->mdiobus) {
1822                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1823                 return -ENOMEM;
1824         }
1825
1826         dev->mdiobus->priv = (void *)dev;
1827         dev->mdiobus->read = lan78xx_mdiobus_read;
1828         dev->mdiobus->write = lan78xx_mdiobus_write;
1829         dev->mdiobus->name = "lan78xx-mdiobus";
1830
1831         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1832                  dev->udev->bus->busnum, dev->udev->devnum);
1833
1834         switch (dev->chipid) {
1835         case ID_REV_CHIP_ID_7800_:
1836         case ID_REV_CHIP_ID_7850_:
1837                 /* set to internal PHY id */
1838                 dev->mdiobus->phy_mask = ~(1 << 1);
1839                 break;
1840         case ID_REV_CHIP_ID_7801_:
1841                 /* scan thru PHYAD[2..0] */
1842                 dev->mdiobus->phy_mask = ~(0xFF);
1843                 break;
1844         }
1845
1846         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1847         ret = of_mdiobus_register(dev->mdiobus, node);
1848         if (node)
1849                 of_node_put(node);
1850         if (ret) {
1851                 netdev_err(dev->net, "can't register MDIO bus\n");
1852                 goto exit1;
1853         }
1854
1855         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1856         return 0;
1857 exit1:
1858         mdiobus_free(dev->mdiobus);
1859         return ret;
1860 }
1861
1862 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1863 {
1864         mdiobus_unregister(dev->mdiobus);
1865         mdiobus_free(dev->mdiobus);
1866 }
1867
1868 static void lan78xx_link_status_change(struct net_device *net)
1869 {
1870         struct phy_device *phydev = net->phydev;
1871         int ret, temp;
1872
1873         /* At forced 100 F/H mode, chip may fail to set mode correctly
1874          * when cable is switched between long(~50+m) and short one.
1875          * As workaround, set to 10 before setting to 100
1876          * at forced 100 F/H mode.
1877          */
1878         if (!phydev->autoneg && (phydev->speed == 100)) {
1879                 /* disable phy interrupt */
1880                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1881                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1882                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1883
1884                 temp = phy_read(phydev, MII_BMCR);
1885                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1886                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1887                 temp |= BMCR_SPEED100;
1888                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1889
1890                 /* clear pending interrupt generated while workaround */
1891                 temp = phy_read(phydev, LAN88XX_INT_STS);
1892
1893                 /* enable phy interrupt back */
1894                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1895                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1896                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1897         }
1898 }
1899
1900 static int irq_map(struct irq_domain *d, unsigned int irq,
1901                    irq_hw_number_t hwirq)
1902 {
1903         struct irq_domain_data *data = d->host_data;
1904
1905         irq_set_chip_data(irq, data);
1906         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1907         irq_set_noprobe(irq);
1908
1909         return 0;
1910 }
1911
1912 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1913 {
1914         irq_set_chip_and_handler(irq, NULL, NULL);
1915         irq_set_chip_data(irq, NULL);
1916 }
1917
1918 static const struct irq_domain_ops chip_domain_ops = {
1919         .map    = irq_map,
1920         .unmap  = irq_unmap,
1921 };
1922
1923 static void lan78xx_irq_mask(struct irq_data *irqd)
1924 {
1925         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1926
1927         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1928 }
1929
1930 static void lan78xx_irq_unmask(struct irq_data *irqd)
1931 {
1932         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1933
1934         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1935 }
1936
1937 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1938 {
1939         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1940
1941         mutex_lock(&data->irq_lock);
1942 }
1943
1944 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1945 {
1946         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1947         struct lan78xx_net *dev =
1948                         container_of(data, struct lan78xx_net, domain_data);
1949         u32 buf;
1950         int ret;
1951
1952         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1953          * are only two callbacks executed in non-atomic contex.
1954          */
1955         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1956         if (buf != data->irqenable)
1957                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1958
1959         mutex_unlock(&data->irq_lock);
1960 }
1961
1962 static struct irq_chip lan78xx_irqchip = {
1963         .name                   = "lan78xx-irqs",
1964         .irq_mask               = lan78xx_irq_mask,
1965         .irq_unmask             = lan78xx_irq_unmask,
1966         .irq_bus_lock           = lan78xx_irq_bus_lock,
1967         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1968 };
1969
1970 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1971 {
1972         struct device_node *of_node;
1973         struct irq_domain *irqdomain;
1974         unsigned int irqmap = 0;
1975         u32 buf;
1976         int ret = 0;
1977
1978         of_node = dev->udev->dev.parent->of_node;
1979
1980         mutex_init(&dev->domain_data.irq_lock);
1981
1982         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1983         dev->domain_data.irqenable = buf;
1984
1985         dev->domain_data.irqchip = &lan78xx_irqchip;
1986         dev->domain_data.irq_handler = handle_simple_irq;
1987
1988         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1989                                           &chip_domain_ops, &dev->domain_data);
1990         if (irqdomain) {
1991                 /* create mapping for PHY interrupt */
1992                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1993                 if (!irqmap) {
1994                         irq_domain_remove(irqdomain);
1995
1996                         irqdomain = NULL;
1997                         ret = -EINVAL;
1998                 }
1999         } else {
2000                 ret = -EINVAL;
2001         }
2002
2003         dev->domain_data.irqdomain = irqdomain;
2004         dev->domain_data.phyirq = irqmap;
2005
2006         return ret;
2007 }
2008
2009 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2010 {
2011         if (dev->domain_data.phyirq > 0) {
2012                 irq_dispose_mapping(dev->domain_data.phyirq);
2013
2014                 if (dev->domain_data.irqdomain)
2015                         irq_domain_remove(dev->domain_data.irqdomain);
2016         }
2017         dev->domain_data.phyirq = 0;
2018         dev->domain_data.irqdomain = NULL;
2019 }
2020
2021 static int lan8835_fixup(struct phy_device *phydev)
2022 {
2023         int buf;
2024         int ret;
2025         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2026
2027         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2028         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2029         buf &= ~0x1800;
2030         buf |= 0x0800;
2031         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2032
2033         /* RGMII MAC TXC Delay Enable */
2034         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2035                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2036
2037         /* RGMII TX DLL Tune Adjust */
2038         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2039
2040         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2041
2042         return 1;
2043 }
2044
2045 static int ksz9031rnx_fixup(struct phy_device *phydev)
2046 {
2047         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2048
2049         /* Micrel9301RNX PHY configuration */
2050         /* RGMII Control Signal Pad Skew */
2051         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2052         /* RGMII RX Data Pad Skew */
2053         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2054         /* RGMII RX Clock Pad Skew */
2055         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2056
2057         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2058
2059         return 1;
2060 }
2061
2062 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2063 {
2064         u32 buf;
2065         int ret;
2066         struct fixed_phy_status fphy_status = {
2067                 .link = 1,
2068                 .speed = SPEED_1000,
2069                 .duplex = DUPLEX_FULL,
2070         };
2071         struct phy_device *phydev;
2072
2073         phydev = phy_find_first(dev->mdiobus);
2074         if (!phydev) {
2075                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2076                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2077                                             NULL);
2078                 if (IS_ERR(phydev)) {
2079                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2080                         return NULL;
2081                 }
2082                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2083                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2084                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2085                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2086                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2087                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2088                 buf |= HW_CFG_CLK125_EN_;
2089                 buf |= HW_CFG_REFCLK25_EN_;
2090                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2091         } else {
2092                 if (!phydev->drv) {
2093                         netdev_err(dev->net, "no PHY driver found\n");
2094                         return NULL;
2095                 }
2096                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2097                 /* external PHY fixup for KSZ9031RNX */
2098                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2099                                                  ksz9031rnx_fixup);
2100                 if (ret < 0) {
2101                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2102                         return NULL;
2103                 }
2104                 /* external PHY fixup for LAN8835 */
2105                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2106                                                  lan8835_fixup);
2107                 if (ret < 0) {
2108                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2109                         return NULL;
2110                 }
2111                 /* add more external PHY fixup here if needed */
2112
2113                 phydev->is_internal = false;
2114         }
2115         return phydev;
2116 }
2117
2118 static int lan78xx_phy_init(struct lan78xx_net *dev)
2119 {
2120         int ret;
2121         u32 mii_adv;
2122         struct phy_device *phydev;
2123
2124         switch (dev->chipid) {
2125         case ID_REV_CHIP_ID_7801_:
2126                 phydev = lan7801_phy_init(dev);
2127                 if (!phydev) {
2128                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2129                         return -EIO;
2130                 }
2131                 break;
2132
2133         case ID_REV_CHIP_ID_7800_:
2134         case ID_REV_CHIP_ID_7850_:
2135                 phydev = phy_find_first(dev->mdiobus);
2136                 if (!phydev) {
2137                         netdev_err(dev->net, "no PHY found\n");
2138                         return -EIO;
2139                 }
2140                 phydev->is_internal = true;
2141                 dev->interface = PHY_INTERFACE_MODE_GMII;
2142                 break;
2143
2144         default:
2145                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2146                 return -EIO;
2147         }
2148
2149         /* if phyirq is not set, use polling mode in phylib */
2150         if (dev->domain_data.phyirq > 0)
2151                 phydev->irq = dev->domain_data.phyirq;
2152         else
2153                 phydev->irq = 0;
2154         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2155
2156         /* set to AUTOMDIX */
2157         phydev->mdix = ETH_TP_MDI_AUTO;
2158
2159         ret = phy_connect_direct(dev->net, phydev,
2160                                  lan78xx_link_status_change,
2161                                  dev->interface);
2162         if (ret) {
2163                 netdev_err(dev->net, "can't attach PHY to %s\n",
2164                            dev->mdiobus->id);
2165                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2166                         if (phy_is_pseudo_fixed_link(phydev)) {
2167                                 fixed_phy_unregister(phydev);
2168                         } else {
2169                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2170                                                              0xfffffff0);
2171                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2172                                                              0xfffffff0);
2173                         }
2174                 }
2175                 return -EIO;
2176         }
2177
2178         /* MAC doesn't support 1000T Half */
2179         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2180
2181         /* support both flow controls */
2182         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2183         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2184         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2185         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2186
2187         if (phydev->mdio.dev.of_node) {
2188                 u32 reg;
2189                 int len;
2190
2191                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2192                                                       "microchip,led-modes",
2193                                                       sizeof(u32));
2194                 if (len >= 0) {
2195                         /* Ensure the appropriate LEDs are enabled */
2196                         lan78xx_read_reg(dev, HW_CFG, &reg);
2197                         reg &= ~(HW_CFG_LED0_EN_ |
2198                                  HW_CFG_LED1_EN_ |
2199                                  HW_CFG_LED2_EN_ |
2200                                  HW_CFG_LED3_EN_);
2201                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2202                                 (len > 1) * HW_CFG_LED1_EN_ |
2203                                 (len > 2) * HW_CFG_LED2_EN_ |
2204                                 (len > 3) * HW_CFG_LED3_EN_;
2205                         lan78xx_write_reg(dev, HW_CFG, reg);
2206                 }
2207         }
2208
2209         genphy_config_aneg(phydev);
2210
2211         dev->fc_autoneg = phydev->autoneg;
2212
2213         return 0;
2214 }
2215
2216 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2217 {
2218         int ret = 0;
2219         u32 buf;
2220         bool rxenabled;
2221
2222         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2223
2224         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2225
2226         if (rxenabled) {
2227                 buf &= ~MAC_RX_RXEN_;
2228                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2229         }
2230
2231         /* add 4 to size for FCS */
2232         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2233         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2234
2235         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2236
2237         if (rxenabled) {
2238                 buf |= MAC_RX_RXEN_;
2239                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2240         }
2241
2242         return 0;
2243 }
2244
2245 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2246 {
2247         struct sk_buff *skb;
2248         unsigned long flags;
2249         int count = 0;
2250
2251         spin_lock_irqsave(&q->lock, flags);
2252         while (!skb_queue_empty(q)) {
2253                 struct skb_data *entry;
2254                 struct urb *urb;
2255                 int ret;
2256
2257                 skb_queue_walk(q, skb) {
2258                         entry = (struct skb_data *)skb->cb;
2259                         if (entry->state != unlink_start)
2260                                 goto found;
2261                 }
2262                 break;
2263 found:
2264                 entry->state = unlink_start;
2265                 urb = entry->urb;
2266
2267                 /* Get reference count of the URB to avoid it to be
2268                  * freed during usb_unlink_urb, which may trigger
2269                  * use-after-free problem inside usb_unlink_urb since
2270                  * usb_unlink_urb is always racing with .complete
2271                  * handler(include defer_bh).
2272                  */
2273                 usb_get_urb(urb);
2274                 spin_unlock_irqrestore(&q->lock, flags);
2275                 /* during some PM-driven resume scenarios,
2276                  * these (async) unlinks complete immediately
2277                  */
2278                 ret = usb_unlink_urb(urb);
2279                 if (ret != -EINPROGRESS && ret != 0)
2280                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2281                 else
2282                         count++;
2283                 usb_put_urb(urb);
2284                 spin_lock_irqsave(&q->lock, flags);
2285         }
2286         spin_unlock_irqrestore(&q->lock, flags);
2287         return count;
2288 }
2289
2290 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2291 {
2292         struct lan78xx_net *dev = netdev_priv(netdev);
2293         int ll_mtu = new_mtu + netdev->hard_header_len;
2294         int old_hard_mtu = dev->hard_mtu;
2295         int old_rx_urb_size = dev->rx_urb_size;
2296         int ret;
2297
2298         /* no second zero-length packet read wanted after mtu-sized packets */
2299         if ((ll_mtu % dev->maxpacket) == 0)
2300                 return -EDOM;
2301
2302         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2303
2304         netdev->mtu = new_mtu;
2305
2306         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2307         if (dev->rx_urb_size == old_hard_mtu) {
2308                 dev->rx_urb_size = dev->hard_mtu;
2309                 if (dev->rx_urb_size > old_rx_urb_size) {
2310                         if (netif_running(dev->net)) {
2311                                 unlink_urbs(dev, &dev->rxq);
2312                                 tasklet_schedule(&dev->bh);
2313                         }
2314                 }
2315         }
2316
2317         return 0;
2318 }
2319
2320 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2321 {
2322         struct lan78xx_net *dev = netdev_priv(netdev);
2323         struct sockaddr *addr = p;
2324         u32 addr_lo, addr_hi;
2325         int ret;
2326
2327         if (netif_running(netdev))
2328                 return -EBUSY;
2329
2330         if (!is_valid_ether_addr(addr->sa_data))
2331                 return -EADDRNOTAVAIL;
2332
2333         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2334
2335         addr_lo = netdev->dev_addr[0] |
2336                   netdev->dev_addr[1] << 8 |
2337                   netdev->dev_addr[2] << 16 |
2338                   netdev->dev_addr[3] << 24;
2339         addr_hi = netdev->dev_addr[4] |
2340                   netdev->dev_addr[5] << 8;
2341
2342         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2343         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2344
2345         return 0;
2346 }
2347
2348 /* Enable or disable Rx checksum offload engine */
2349 static int lan78xx_set_features(struct net_device *netdev,
2350                                 netdev_features_t features)
2351 {
2352         struct lan78xx_net *dev = netdev_priv(netdev);
2353         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2354         unsigned long flags;
2355         int ret;
2356
2357         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2358
2359         if (features & NETIF_F_RXCSUM) {
2360                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2361                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2362         } else {
2363                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2364                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2365         }
2366
2367         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2368                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2369         else
2370                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2371
2372         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2373                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2374         else
2375                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2376
2377         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2378
2379         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2380
2381         return 0;
2382 }
2383
2384 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2385 {
2386         struct lan78xx_priv *pdata =
2387                         container_of(param, struct lan78xx_priv, set_vlan);
2388         struct lan78xx_net *dev = pdata->dev;
2389
2390         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2391                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2392 }
2393
2394 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2395                                    __be16 proto, u16 vid)
2396 {
2397         struct lan78xx_net *dev = netdev_priv(netdev);
2398         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2399         u16 vid_bit_index;
2400         u16 vid_dword_index;
2401
2402         vid_dword_index = (vid >> 5) & 0x7F;
2403         vid_bit_index = vid & 0x1F;
2404
2405         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2406
2407         /* defer register writes to a sleepable context */
2408         schedule_work(&pdata->set_vlan);
2409
2410         return 0;
2411 }
2412
2413 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2414                                     __be16 proto, u16 vid)
2415 {
2416         struct lan78xx_net *dev = netdev_priv(netdev);
2417         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2418         u16 vid_bit_index;
2419         u16 vid_dword_index;
2420
2421         vid_dword_index = (vid >> 5) & 0x7F;
2422         vid_bit_index = vid & 0x1F;
2423
2424         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2425
2426         /* defer register writes to a sleepable context */
2427         schedule_work(&pdata->set_vlan);
2428
2429         return 0;
2430 }
2431
2432 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2433 {
2434         int ret;
2435         u32 buf;
2436         u32 regs[6] = { 0 };
2437
2438         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2439         if (buf & USB_CFG1_LTM_ENABLE_) {
2440                 u8 temp[2];
2441                 /* Get values from EEPROM first */
2442                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2443                         if (temp[0] == 24) {
2444                                 ret = lan78xx_read_raw_eeprom(dev,
2445                                                               temp[1] * 2,
2446                                                               24,
2447                                                               (u8 *)regs);
2448                                 if (ret < 0)
2449                                         return;
2450                         }
2451                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2452                         if (temp[0] == 24) {
2453                                 ret = lan78xx_read_raw_otp(dev,
2454                                                            temp[1] * 2,
2455                                                            24,
2456                                                            (u8 *)regs);
2457                                 if (ret < 0)
2458                                         return;
2459                         }
2460                 }
2461         }
2462
2463         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2464         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2465         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2466         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2467         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2468         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2469 }
2470
2471 static int lan78xx_reset(struct lan78xx_net *dev)
2472 {
2473         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2474         u32 buf;
2475         int ret = 0;
2476         unsigned long timeout;
2477         u8 sig;
2478
2479         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2480         buf |= HW_CFG_LRST_;
2481         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2482
2483         timeout = jiffies + HZ;
2484         do {
2485                 mdelay(1);
2486                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2487                 if (time_after(jiffies, timeout)) {
2488                         netdev_warn(dev->net,
2489                                     "timeout on completion of LiteReset");
2490                         return -EIO;
2491                 }
2492         } while (buf & HW_CFG_LRST_);
2493
2494         lan78xx_init_mac_address(dev);
2495
2496         /* save DEVID for later usage */
2497         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2498         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2499         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2500
2501         /* Respond to the IN token with a NAK */
2502         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2503         buf |= USB_CFG_BIR_;
2504         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2505
2506         /* Init LTM */
2507         lan78xx_init_ltm(dev);
2508
2509         if (dev->udev->speed == USB_SPEED_SUPER) {
2510                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2511                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2512                 dev->rx_qlen = 4;
2513                 dev->tx_qlen = 4;
2514         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2515                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2516                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2517                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2518                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2519         } else {
2520                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2521                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2522                 dev->rx_qlen = 4;
2523                 dev->tx_qlen = 4;
2524         }
2525
2526         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2527         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2528
2529         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2530         buf |= HW_CFG_MEF_;
2531         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2532
2533         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2534         buf |= USB_CFG_BCE_;
2535         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2536
2537         /* set FIFO sizes */
2538         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2539         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2540
2541         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2542         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2543
2544         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2545         ret = lan78xx_write_reg(dev, FLOW, 0);
2546         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2547
2548         /* Don't need rfe_ctl_lock during initialisation */
2549         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2550         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2551         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2552
2553         /* Enable or disable checksum offload engines */
2554         lan78xx_set_features(dev->net, dev->net->features);
2555
2556         lan78xx_set_multicast(dev->net);
2557
2558         /* reset PHY */
2559         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2560         buf |= PMT_CTL_PHY_RST_;
2561         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2562
2563         timeout = jiffies + HZ;
2564         do {
2565                 mdelay(1);
2566                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2567                 if (time_after(jiffies, timeout)) {
2568                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2569                         return -EIO;
2570                 }
2571         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2572
2573         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2574         /* LAN7801 only has RGMII mode */
2575         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2576                 buf &= ~MAC_CR_GMII_EN_;
2577
2578         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2579                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2580                 if (!ret && sig != EEPROM_INDICATOR) {
2581                         /* Implies there is no external eeprom. Set mac speed */
2582                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2583                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2584                 }
2585         }
2586         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2587
2588         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2589         buf |= MAC_TX_TXEN_;
2590         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2591
2592         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2593         buf |= FCT_TX_CTL_EN_;
2594         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2595
2596         ret = lan78xx_set_rx_max_frame_length(dev,
2597                                               dev->net->mtu + VLAN_ETH_HLEN);
2598
2599         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2600         buf |= MAC_RX_RXEN_;
2601         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2602
2603         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2604         buf |= FCT_RX_CTL_EN_;
2605         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2606
2607         return 0;
2608 }
2609
2610 static void lan78xx_init_stats(struct lan78xx_net *dev)
2611 {
2612         u32 *p;
2613         int i;
2614
2615         /* initialize for stats update
2616          * some counters are 20bits and some are 32bits
2617          */
2618         p = (u32 *)&dev->stats.rollover_max;
2619         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2620                 p[i] = 0xFFFFF;
2621
2622         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2623         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2624         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2625         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2626         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2627         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2628         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2629         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2630         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2631         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2632
2633         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2634 }
2635
2636 static int lan78xx_open(struct net_device *net)
2637 {
2638         struct lan78xx_net *dev = netdev_priv(net);
2639         int ret;
2640
2641         ret = usb_autopm_get_interface(dev->intf);
2642         if (ret < 0)
2643                 goto out;
2644
2645         phy_start(net->phydev);
2646
2647         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2648
2649         /* for Link Check */
2650         if (dev->urb_intr) {
2651                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2652                 if (ret < 0) {
2653                         netif_err(dev, ifup, dev->net,
2654                                   "intr submit %d\n", ret);
2655                         goto done;
2656                 }
2657         }
2658
2659         lan78xx_init_stats(dev);
2660
2661         set_bit(EVENT_DEV_OPEN, &dev->flags);
2662
2663         netif_start_queue(net);
2664
2665         dev->link_on = false;
2666
2667         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2668 done:
2669         usb_autopm_put_interface(dev->intf);
2670
2671 out:
2672         return ret;
2673 }
2674
2675 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2676 {
2677         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2678         DECLARE_WAITQUEUE(wait, current);
2679         int temp;
2680
2681         /* ensure there are no more active urbs */
2682         add_wait_queue(&unlink_wakeup, &wait);
2683         set_current_state(TASK_UNINTERRUPTIBLE);
2684         dev->wait = &unlink_wakeup;
2685         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2686
2687         /* maybe wait for deletions to finish. */
2688         while (!skb_queue_empty(&dev->rxq) &&
2689                !skb_queue_empty(&dev->txq) &&
2690                !skb_queue_empty(&dev->done)) {
2691                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2692                 set_current_state(TASK_UNINTERRUPTIBLE);
2693                 netif_dbg(dev, ifdown, dev->net,
2694                           "waited for %d urb completions\n", temp);
2695         }
2696         set_current_state(TASK_RUNNING);
2697         dev->wait = NULL;
2698         remove_wait_queue(&unlink_wakeup, &wait);
2699 }
2700
2701 static int lan78xx_stop(struct net_device *net)
2702 {
2703         struct lan78xx_net              *dev = netdev_priv(net);
2704
2705         if (timer_pending(&dev->stat_monitor))
2706                 del_timer_sync(&dev->stat_monitor);
2707
2708         if (net->phydev)
2709                 phy_stop(net->phydev);
2710
2711         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2712         netif_stop_queue(net);
2713
2714         netif_info(dev, ifdown, dev->net,
2715                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2716                    net->stats.rx_packets, net->stats.tx_packets,
2717                    net->stats.rx_errors, net->stats.tx_errors);
2718
2719         lan78xx_terminate_urbs(dev);
2720
2721         usb_kill_urb(dev->urb_intr);
2722
2723         skb_queue_purge(&dev->rxq_pause);
2724
2725         /* deferred work (task, timer, softirq) must also stop.
2726          * can't flush_scheduled_work() until we drop rtnl (later),
2727          * else workers could deadlock; so make workers a NOP.
2728          */
2729         dev->flags = 0;
2730         cancel_delayed_work_sync(&dev->wq);
2731         tasklet_kill(&dev->bh);
2732
2733         usb_autopm_put_interface(dev->intf);
2734
2735         return 0;
2736 }
2737
2738 static int lan78xx_linearize(struct sk_buff *skb)
2739 {
2740         return skb_linearize(skb);
2741 }
2742
2743 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2744                                        struct sk_buff *skb, gfp_t flags)
2745 {
2746         u32 tx_cmd_a, tx_cmd_b;
2747
2748         if (skb_cow_head(skb, TX_OVERHEAD)) {
2749                 dev_kfree_skb_any(skb);
2750                 return NULL;
2751         }
2752
2753         if (lan78xx_linearize(skb) < 0)
2754                 return NULL;
2755
2756         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2757
2758         if (skb->ip_summed == CHECKSUM_PARTIAL)
2759                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2760
2761         tx_cmd_b = 0;
2762         if (skb_is_gso(skb)) {
2763                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2764
2765                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2766
2767                 tx_cmd_a |= TX_CMD_A_LSO_;
2768         }
2769
2770         if (skb_vlan_tag_present(skb)) {
2771                 tx_cmd_a |= TX_CMD_A_IVTG_;
2772                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2773         }
2774
2775         skb_push(skb, 4);
2776         cpu_to_le32s(&tx_cmd_b);
2777         memcpy(skb->data, &tx_cmd_b, 4);
2778
2779         skb_push(skb, 4);
2780         cpu_to_le32s(&tx_cmd_a);
2781         memcpy(skb->data, &tx_cmd_a, 4);
2782
2783         return skb;
2784 }
2785
2786 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2787                                struct sk_buff_head *list, enum skb_state state)
2788 {
2789         unsigned long flags;
2790         enum skb_state old_state;
2791         struct skb_data *entry = (struct skb_data *)skb->cb;
2792
2793         spin_lock_irqsave(&list->lock, flags);
2794         old_state = entry->state;
2795         entry->state = state;
2796
2797         __skb_unlink(skb, list);
2798         spin_unlock(&list->lock);
2799         spin_lock(&dev->done.lock);
2800
2801         __skb_queue_tail(&dev->done, skb);
2802         if (skb_queue_len(&dev->done) == 1)
2803                 tasklet_schedule(&dev->bh);
2804         spin_unlock_irqrestore(&dev->done.lock, flags);
2805
2806         return old_state;
2807 }
2808
2809 static void tx_complete(struct urb *urb)
2810 {
2811         struct sk_buff *skb = (struct sk_buff *)urb->context;
2812         struct skb_data *entry = (struct skb_data *)skb->cb;
2813         struct lan78xx_net *dev = entry->dev;
2814
2815         if (urb->status == 0) {
2816                 dev->net->stats.tx_packets += entry->num_of_packet;
2817                 dev->net->stats.tx_bytes += entry->length;
2818         } else {
2819                 dev->net->stats.tx_errors++;
2820
2821                 switch (urb->status) {
2822                 case -EPIPE:
2823                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2824                         break;
2825
2826                 /* software-driven interface shutdown */
2827                 case -ECONNRESET:
2828                 case -ESHUTDOWN:
2829                         break;
2830
2831                 case -EPROTO:
2832                 case -ETIME:
2833                 case -EILSEQ:
2834                         netif_stop_queue(dev->net);
2835                         break;
2836                 default:
2837                         netif_dbg(dev, tx_err, dev->net,
2838                                   "tx err %d\n", entry->urb->status);
2839                         break;
2840                 }
2841         }
2842
2843         usb_autopm_put_interface_async(dev->intf);
2844
2845         defer_bh(dev, skb, &dev->txq, tx_done);
2846 }
2847
2848 static void lan78xx_queue_skb(struct sk_buff_head *list,
2849                               struct sk_buff *newsk, enum skb_state state)
2850 {
2851         struct skb_data *entry = (struct skb_data *)newsk->cb;
2852
2853         __skb_queue_tail(list, newsk);
2854         entry->state = state;
2855 }
2856
2857 static netdev_tx_t
2858 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2859 {
2860         struct lan78xx_net *dev = netdev_priv(net);
2861         struct sk_buff *skb2 = NULL;
2862
2863         if (skb) {
2864                 skb_tx_timestamp(skb);
2865                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2866         }
2867
2868         if (skb2) {
2869                 skb_queue_tail(&dev->txq_pend, skb2);
2870
2871                 /* throttle TX patch at slower than SUPER SPEED USB */
2872                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2873                     (skb_queue_len(&dev->txq_pend) > 10))
2874                         netif_stop_queue(net);
2875         } else {
2876                 netif_dbg(dev, tx_err, dev->net,
2877                           "lan78xx_tx_prep return NULL\n");
2878                 dev->net->stats.tx_errors++;
2879                 dev->net->stats.tx_dropped++;
2880         }
2881
2882         tasklet_schedule(&dev->bh);
2883
2884         return NETDEV_TX_OK;
2885 }
2886
2887 static int
2888 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2889 {
2890         int tmp;
2891         struct usb_host_interface *alt = NULL;
2892         struct usb_host_endpoint *in = NULL, *out = NULL;
2893         struct usb_host_endpoint *status = NULL;
2894
2895         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2896                 unsigned ep;
2897
2898                 in = NULL;
2899                 out = NULL;
2900                 status = NULL;
2901                 alt = intf->altsetting + tmp;
2902
2903                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2904                         struct usb_host_endpoint *e;
2905                         int intr = 0;
2906
2907                         e = alt->endpoint + ep;
2908                         switch (e->desc.bmAttributes) {
2909                         case USB_ENDPOINT_XFER_INT:
2910                                 if (!usb_endpoint_dir_in(&e->desc))
2911                                         continue;
2912                                 intr = 1;
2913                                 /* FALLTHROUGH */
2914                         case USB_ENDPOINT_XFER_BULK:
2915                                 break;
2916                         default:
2917                                 continue;
2918                         }
2919                         if (usb_endpoint_dir_in(&e->desc)) {
2920                                 if (!intr && !in)
2921                                         in = e;
2922                                 else if (intr && !status)
2923                                         status = e;
2924                         } else {
2925                                 if (!out)
2926                                         out = e;
2927                         }
2928                 }
2929                 if (in && out)
2930                         break;
2931         }
2932         if (!alt || !in || !out)
2933                 return -EINVAL;
2934
2935         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2936                                        in->desc.bEndpointAddress &
2937                                        USB_ENDPOINT_NUMBER_MASK);
2938         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2939                                         out->desc.bEndpointAddress &
2940                                         USB_ENDPOINT_NUMBER_MASK);
2941         dev->ep_intr = status;
2942
2943         return 0;
2944 }
2945
2946 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2947 {
2948         struct lan78xx_priv *pdata = NULL;
2949         int ret;
2950         int i;
2951
2952         ret = lan78xx_get_endpoints(dev, intf);
2953
2954         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2955
2956         pdata = (struct lan78xx_priv *)(dev->data[0]);
2957         if (!pdata) {
2958                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2959                 return -ENOMEM;
2960         }
2961
2962         pdata->dev = dev;
2963
2964         spin_lock_init(&pdata->rfe_ctl_lock);
2965         mutex_init(&pdata->dataport_mutex);
2966
2967         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2968
2969         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2970                 pdata->vlan_table[i] = 0;
2971
2972         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2973
2974         dev->net->features = 0;
2975
2976         if (DEFAULT_TX_CSUM_ENABLE)
2977                 dev->net->features |= NETIF_F_HW_CSUM;
2978
2979         if (DEFAULT_RX_CSUM_ENABLE)
2980                 dev->net->features |= NETIF_F_RXCSUM;
2981
2982         if (DEFAULT_TSO_CSUM_ENABLE)
2983                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2984
2985         if (DEFAULT_VLAN_RX_OFFLOAD)
2986                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2987
2988         if (DEFAULT_VLAN_FILTER_ENABLE)
2989                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2990
2991         dev->net->hw_features = dev->net->features;
2992
2993         ret = lan78xx_setup_irq_domain(dev);
2994         if (ret < 0) {
2995                 netdev_warn(dev->net,
2996                             "lan78xx_setup_irq_domain() failed : %d", ret);
2997                 goto out1;
2998         }
2999
3000         dev->net->hard_header_len += TX_OVERHEAD;
3001         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3002
3003         /* Init all registers */
3004         ret = lan78xx_reset(dev);
3005         if (ret) {
3006                 netdev_warn(dev->net, "Registers INIT FAILED....");
3007                 goto out2;
3008         }
3009
3010         ret = lan78xx_mdio_init(dev);
3011         if (ret) {
3012                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3013                 goto out2;
3014         }
3015
3016         dev->net->flags |= IFF_MULTICAST;
3017
3018         pdata->wol = WAKE_MAGIC;
3019
3020         return ret;
3021
3022 out2:
3023         lan78xx_remove_irq_domain(dev);
3024
3025 out1:
3026         netdev_warn(dev->net, "Bind routine FAILED");
3027         cancel_work_sync(&pdata->set_multicast);
3028         cancel_work_sync(&pdata->set_vlan);
3029         kfree(pdata);
3030         return ret;
3031 }
3032
3033 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3034 {
3035         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3036
3037         lan78xx_remove_irq_domain(dev);
3038
3039         lan78xx_remove_mdio(dev);
3040
3041         if (pdata) {
3042                 cancel_work_sync(&pdata->set_multicast);
3043                 cancel_work_sync(&pdata->set_vlan);
3044                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3045                 kfree(pdata);
3046                 pdata = NULL;
3047                 dev->data[0] = 0;
3048         }
3049 }
3050
3051 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3052                                     struct sk_buff *skb,
3053                                     u32 rx_cmd_a, u32 rx_cmd_b)
3054 {
3055         /* HW Checksum offload appears to be flawed if used when not stripping
3056          * VLAN headers. Drop back to S/W checksums under these conditions.
3057          */
3058         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3059             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3060             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3061              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3062                 skb->ip_summed = CHECKSUM_NONE;
3063         } else {
3064                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3065                 skb->ip_summed = CHECKSUM_COMPLETE;
3066         }
3067 }
3068
3069 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3070                                     struct sk_buff *skb,
3071                                     u32 rx_cmd_a, u32 rx_cmd_b)
3072 {
3073         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3074             (rx_cmd_a & RX_CMD_A_FVTG_))
3075                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3076                                        (rx_cmd_b & 0xffff));
3077 }
3078
3079 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3080 {
3081         int             status;
3082
3083         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3084                 skb_queue_tail(&dev->rxq_pause, skb);
3085                 return;
3086         }
3087
3088         dev->net->stats.rx_packets++;
3089         dev->net->stats.rx_bytes += skb->len;
3090
3091         skb->protocol = eth_type_trans(skb, dev->net);
3092
3093         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3094                   skb->len + sizeof(struct ethhdr), skb->protocol);
3095         memset(skb->cb, 0, sizeof(struct skb_data));
3096
3097         if (skb_defer_rx_timestamp(skb))
3098                 return;
3099
3100         status = netif_rx(skb);
3101         if (status != NET_RX_SUCCESS)
3102                 netif_dbg(dev, rx_err, dev->net,
3103                           "netif_rx status %d\n", status);
3104 }
3105
3106 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3107 {
3108         if (skb->len < dev->net->hard_header_len)
3109                 return 0;
3110
3111         while (skb->len > 0) {
3112                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3113                 u16 rx_cmd_c;
3114                 struct sk_buff *skb2;
3115                 unsigned char *packet;
3116
3117                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3118                 le32_to_cpus(&rx_cmd_a);
3119                 skb_pull(skb, sizeof(rx_cmd_a));
3120
3121                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3122                 le32_to_cpus(&rx_cmd_b);
3123                 skb_pull(skb, sizeof(rx_cmd_b));
3124
3125                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3126                 le16_to_cpus(&rx_cmd_c);
3127                 skb_pull(skb, sizeof(rx_cmd_c));
3128
3129                 packet = skb->data;
3130
3131                 /* get the packet length */
3132                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3133                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3134
3135                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3136                         netif_dbg(dev, rx_err, dev->net,
3137                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3138                 } else {
3139                         /* last frame in this batch */
3140                         if (skb->len == size) {
3141                                 lan78xx_rx_csum_offload(dev, skb,
3142                                                         rx_cmd_a, rx_cmd_b);
3143                                 lan78xx_rx_vlan_offload(dev, skb,
3144                                                         rx_cmd_a, rx_cmd_b);
3145
3146                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3147                                 skb->truesize = size + sizeof(struct sk_buff);
3148
3149                                 return 1;
3150                         }
3151
3152                         skb2 = skb_clone(skb, GFP_ATOMIC);
3153                         if (unlikely(!skb2)) {
3154                                 netdev_warn(dev->net, "Error allocating skb");
3155                                 return 0;
3156                         }
3157
3158                         skb2->len = size;
3159                         skb2->data = packet;
3160                         skb_set_tail_pointer(skb2, size);
3161
3162                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3163                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3164
3165                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3166                         skb2->truesize = size + sizeof(struct sk_buff);
3167
3168                         lan78xx_skb_return(dev, skb2);
3169                 }
3170
3171                 skb_pull(skb, size);
3172
3173                 /* padding bytes before the next frame starts */
3174                 if (skb->len)
3175                         skb_pull(skb, align_count);
3176         }
3177
3178         return 1;
3179 }
3180
3181 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3182 {
3183         if (!lan78xx_rx(dev, skb)) {
3184                 dev->net->stats.rx_errors++;
3185                 goto done;
3186         }
3187
3188         if (skb->len) {
3189                 lan78xx_skb_return(dev, skb);
3190                 return;
3191         }
3192
3193         netif_dbg(dev, rx_err, dev->net, "drop\n");
3194         dev->net->stats.rx_errors++;
3195 done:
3196         skb_queue_tail(&dev->done, skb);
3197 }
3198
3199 static void rx_complete(struct urb *urb);
3200
3201 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3202 {
3203         struct sk_buff *skb;
3204         struct skb_data *entry;
3205         unsigned long lockflags;
3206         size_t size = dev->rx_urb_size;
3207         int ret = 0;
3208
3209         skb = netdev_alloc_skb_ip_align(dev->net, size);
3210         if (!skb) {
3211                 usb_free_urb(urb);
3212                 return -ENOMEM;
3213         }
3214
3215         entry = (struct skb_data *)skb->cb;
3216         entry->urb = urb;
3217         entry->dev = dev;
3218         entry->length = 0;
3219
3220         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3221                           skb->data, size, rx_complete, skb);
3222
3223         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3224
3225         if (netif_device_present(dev->net) &&
3226             netif_running(dev->net) &&
3227             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3228             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3229                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3230                 switch (ret) {
3231                 case 0:
3232                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3233                         break;
3234                 case -EPIPE:
3235                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3236                         break;
3237                 case -ENODEV:
3238                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3239                         netif_device_detach(dev->net);
3240                         break;
3241                 case -EHOSTUNREACH:
3242                         ret = -ENOLINK;
3243                         break;
3244                 default:
3245                         netif_dbg(dev, rx_err, dev->net,
3246                                   "rx submit, %d\n", ret);
3247                         tasklet_schedule(&dev->bh);
3248                 }
3249         } else {
3250                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3251                 ret = -ENOLINK;
3252         }
3253         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3254         if (ret) {
3255                 dev_kfree_skb_any(skb);
3256                 usb_free_urb(urb);
3257         }
3258         return ret;
3259 }
3260
3261 static void rx_complete(struct urb *urb)
3262 {
3263         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3264         struct skb_data *entry = (struct skb_data *)skb->cb;
3265         struct lan78xx_net *dev = entry->dev;
3266         int urb_status = urb->status;
3267         enum skb_state state;
3268
3269         skb_put(skb, urb->actual_length);
3270         state = rx_done;
3271         entry->urb = NULL;
3272
3273         switch (urb_status) {
3274         case 0:
3275                 if (skb->len < dev->net->hard_header_len) {
3276                         state = rx_cleanup;
3277                         dev->net->stats.rx_errors++;
3278                         dev->net->stats.rx_length_errors++;
3279                         netif_dbg(dev, rx_err, dev->net,
3280                                   "rx length %d\n", skb->len);
3281                 }
3282                 usb_mark_last_busy(dev->udev);
3283                 break;
3284         case -EPIPE:
3285                 dev->net->stats.rx_errors++;
3286                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3287                 /* FALLTHROUGH */
3288         case -ECONNRESET:                               /* async unlink */
3289         case -ESHUTDOWN:                                /* hardware gone */
3290                 netif_dbg(dev, ifdown, dev->net,
3291                           "rx shutdown, code %d\n", urb_status);
3292                 state = rx_cleanup;
3293                 entry->urb = urb;
3294                 urb = NULL;
3295                 break;
3296         case -EPROTO:
3297         case -ETIME:
3298         case -EILSEQ:
3299                 dev->net->stats.rx_errors++;
3300                 state = rx_cleanup;
3301                 entry->urb = urb;
3302                 urb = NULL;
3303                 break;
3304
3305         /* data overrun ... flush fifo? */
3306         case -EOVERFLOW:
3307                 dev->net->stats.rx_over_errors++;
3308                 /* FALLTHROUGH */
3309
3310         default:
3311                 state = rx_cleanup;
3312                 dev->net->stats.rx_errors++;
3313                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3314                 break;
3315         }
3316
3317         state = defer_bh(dev, skb, &dev->rxq, state);
3318
3319         if (urb) {
3320                 if (netif_running(dev->net) &&
3321                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3322                     state != unlink_start) {
3323                         rx_submit(dev, urb, GFP_ATOMIC);
3324                         return;
3325                 }
3326                 usb_free_urb(urb);
3327         }
3328         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3329 }
3330
3331 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3332 {
3333         int length;
3334         struct urb *urb = NULL;
3335         struct skb_data *entry;
3336         unsigned long flags;
3337         struct sk_buff_head *tqp = &dev->txq_pend;
3338         struct sk_buff *skb, *skb2;
3339         int ret;
3340         int count, pos;
3341         int skb_totallen, pkt_cnt;
3342
3343         skb_totallen = 0;
3344         pkt_cnt = 0;
3345         count = 0;
3346         length = 0;
3347         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3348                 if (skb_is_gso(skb)) {
3349                         if (pkt_cnt) {
3350                                 /* handle previous packets first */
3351                                 break;
3352                         }
3353                         count = 1;
3354                         length = skb->len - TX_OVERHEAD;
3355                         skb2 = skb_dequeue(tqp);
3356                         goto gso_skb;
3357                 }
3358
3359                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3360                         break;
3361                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3362                 pkt_cnt++;
3363         }
3364
3365         /* copy to a single skb */
3366         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3367         if (!skb)
3368                 goto drop;
3369
3370         skb_put(skb, skb_totallen);
3371
3372         for (count = pos = 0; count < pkt_cnt; count++) {
3373                 skb2 = skb_dequeue(tqp);
3374                 if (skb2) {
3375                         length += (skb2->len - TX_OVERHEAD);
3376                         memcpy(skb->data + pos, skb2->data, skb2->len);
3377                         pos += roundup(skb2->len, sizeof(u32));
3378                         dev_kfree_skb(skb2);
3379                 }
3380         }
3381
3382 gso_skb:
3383         urb = usb_alloc_urb(0, GFP_ATOMIC);
3384         if (!urb)
3385                 goto drop;
3386
3387         entry = (struct skb_data *)skb->cb;
3388         entry->urb = urb;
3389         entry->dev = dev;
3390         entry->length = length;
3391         entry->num_of_packet = count;
3392
3393         spin_lock_irqsave(&dev->txq.lock, flags);
3394         ret = usb_autopm_get_interface_async(dev->intf);
3395         if (ret < 0) {
3396                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3397                 goto drop;
3398         }
3399
3400         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3401                           skb->data, skb->len, tx_complete, skb);
3402
3403         if (length % dev->maxpacket == 0) {
3404                 /* send USB_ZERO_PACKET */
3405                 urb->transfer_flags |= URB_ZERO_PACKET;
3406         }
3407
3408 #ifdef CONFIG_PM
3409         /* if this triggers the device is still a sleep */
3410         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3411                 /* transmission will be done in resume */
3412                 usb_anchor_urb(urb, &dev->deferred);
3413                 /* no use to process more packets */
3414                 netif_stop_queue(dev->net);
3415                 usb_put_urb(urb);
3416                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3417                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3418                 return;
3419         }
3420 #endif
3421
3422         ret = usb_submit_urb(urb, GFP_ATOMIC);
3423         switch (ret) {
3424         case 0:
3425                 netif_trans_update(dev->net);
3426                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3427                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3428                         netif_stop_queue(dev->net);
3429                 break;
3430         case -EPIPE:
3431                 netif_stop_queue(dev->net);
3432                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3433                 usb_autopm_put_interface_async(dev->intf);
3434                 break;
3435         default:
3436                 usb_autopm_put_interface_async(dev->intf);
3437                 netif_dbg(dev, tx_err, dev->net,
3438                           "tx: submit urb err %d\n", ret);
3439                 break;
3440         }
3441
3442         spin_unlock_irqrestore(&dev->txq.lock, flags);
3443
3444         if (ret) {
3445                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3446 drop:
3447                 dev->net->stats.tx_dropped++;
3448                 if (skb)
3449                         dev_kfree_skb_any(skb);
3450                 usb_free_urb(urb);
3451         } else
3452                 netif_dbg(dev, tx_queued, dev->net,
3453                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3454 }
3455
3456 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3457 {
3458         struct urb *urb;
3459         int i;
3460
3461         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3462                 for (i = 0; i < 10; i++) {
3463                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3464                                 break;
3465                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3466                         if (urb)
3467                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3468                                         return;
3469                 }
3470
3471                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3472                         tasklet_schedule(&dev->bh);
3473         }
3474         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3475                 netif_wake_queue(dev->net);
3476 }
3477
3478 static void lan78xx_bh(unsigned long param)
3479 {
3480         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3481         struct sk_buff *skb;
3482         struct skb_data *entry;
3483
3484         while ((skb = skb_dequeue(&dev->done))) {
3485                 entry = (struct skb_data *)(skb->cb);
3486                 switch (entry->state) {
3487                 case rx_done:
3488                         entry->state = rx_cleanup;
3489                         rx_process(dev, skb);
3490                         continue;
3491                 case tx_done:
3492                         usb_free_urb(entry->urb);
3493                         dev_kfree_skb(skb);
3494                         continue;
3495                 case rx_cleanup:
3496                         usb_free_urb(entry->urb);
3497                         dev_kfree_skb(skb);
3498                         continue;
3499                 default:
3500                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3501                         return;
3502                 }
3503         }
3504
3505         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3506                 /* reset update timer delta */
3507                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3508                         dev->delta = 1;
3509                         mod_timer(&dev->stat_monitor,
3510                                   jiffies + STAT_UPDATE_TIMER);
3511                 }
3512
3513                 if (!skb_queue_empty(&dev->txq_pend))
3514                         lan78xx_tx_bh(dev);
3515
3516                 if (!timer_pending(&dev->delay) &&
3517                     !test_bit(EVENT_RX_HALT, &dev->flags))
3518                         lan78xx_rx_bh(dev);
3519         }
3520 }
3521
3522 static void lan78xx_delayedwork(struct work_struct *work)
3523 {
3524         int status;
3525         struct lan78xx_net *dev;
3526
3527         dev = container_of(work, struct lan78xx_net, wq.work);
3528
3529         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3530                 unlink_urbs(dev, &dev->txq);
3531                 status = usb_autopm_get_interface(dev->intf);
3532                 if (status < 0)
3533                         goto fail_pipe;
3534                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3535                 usb_autopm_put_interface(dev->intf);
3536                 if (status < 0 &&
3537                     status != -EPIPE &&
3538                     status != -ESHUTDOWN) {
3539                         if (netif_msg_tx_err(dev))
3540 fail_pipe:
3541                                 netdev_err(dev->net,
3542                                            "can't clear tx halt, status %d\n",
3543                                            status);
3544                 } else {
3545                         clear_bit(EVENT_TX_HALT, &dev->flags);
3546                         if (status != -ESHUTDOWN)
3547                                 netif_wake_queue(dev->net);
3548                 }
3549         }
3550         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3551                 unlink_urbs(dev, &dev->rxq);
3552                 status = usb_autopm_get_interface(dev->intf);
3553                 if (status < 0)
3554                                 goto fail_halt;
3555                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3556                 usb_autopm_put_interface(dev->intf);
3557                 if (status < 0 &&
3558                     status != -EPIPE &&
3559                     status != -ESHUTDOWN) {
3560                         if (netif_msg_rx_err(dev))
3561 fail_halt:
3562                                 netdev_err(dev->net,
3563                                            "can't clear rx halt, status %d\n",
3564                                            status);
3565                 } else {
3566                         clear_bit(EVENT_RX_HALT, &dev->flags);
3567                         tasklet_schedule(&dev->bh);
3568                 }
3569         }
3570
3571         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3572                 int ret = 0;
3573
3574                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3575                 status = usb_autopm_get_interface(dev->intf);
3576                 if (status < 0)
3577                         goto skip_reset;
3578                 if (lan78xx_link_reset(dev) < 0) {
3579                         usb_autopm_put_interface(dev->intf);
3580 skip_reset:
3581                         netdev_info(dev->net, "link reset failed (%d)\n",
3582                                     ret);
3583                 } else {
3584                         usb_autopm_put_interface(dev->intf);
3585                 }
3586         }
3587
3588         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3589                 lan78xx_update_stats(dev);
3590
3591                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3592
3593                 mod_timer(&dev->stat_monitor,
3594                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3595
3596                 dev->delta = min((dev->delta * 2), 50);
3597         }
3598 }
3599
3600 static void intr_complete(struct urb *urb)
3601 {
3602         struct lan78xx_net *dev = urb->context;
3603         int status = urb->status;
3604
3605         switch (status) {
3606         /* success */
3607         case 0:
3608                 lan78xx_status(dev, urb);
3609                 break;
3610
3611         /* software-driven interface shutdown */
3612         case -ENOENT:                   /* urb killed */
3613         case -ESHUTDOWN:                /* hardware gone */
3614                 netif_dbg(dev, ifdown, dev->net,
3615                           "intr shutdown, code %d\n", status);
3616                 return;
3617
3618         /* NOTE:  not throttling like RX/TX, since this endpoint
3619          * already polls infrequently
3620          */
3621         default:
3622                 netdev_dbg(dev->net, "intr status %d\n", status);
3623                 break;
3624         }
3625
3626         if (!netif_running(dev->net))
3627                 return;
3628
3629         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3630         status = usb_submit_urb(urb, GFP_ATOMIC);
3631         if (status != 0)
3632                 netif_err(dev, timer, dev->net,
3633                           "intr resubmit --> %d\n", status);
3634 }
3635
3636 static void lan78xx_disconnect(struct usb_interface *intf)
3637 {
3638         struct lan78xx_net              *dev;
3639         struct usb_device               *udev;
3640         struct net_device               *net;
3641         struct phy_device               *phydev;
3642
3643         dev = usb_get_intfdata(intf);
3644         usb_set_intfdata(intf, NULL);
3645         if (!dev)
3646                 return;
3647
3648         udev = interface_to_usbdev(intf);
3649         net = dev->net;
3650         phydev = net->phydev;
3651
3652         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3653         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3654
3655         phy_disconnect(net->phydev);
3656
3657         if (phy_is_pseudo_fixed_link(phydev))
3658                 fixed_phy_unregister(phydev);
3659
3660         unregister_netdev(net);
3661
3662         cancel_delayed_work_sync(&dev->wq);
3663
3664         usb_scuttle_anchored_urbs(&dev->deferred);
3665
3666         lan78xx_unbind(dev, intf);
3667
3668         usb_kill_urb(dev->urb_intr);
3669         usb_free_urb(dev->urb_intr);
3670
3671         free_netdev(net);
3672         usb_put_dev(udev);
3673 }
3674
3675 static void lan78xx_tx_timeout(struct net_device *net)
3676 {
3677         struct lan78xx_net *dev = netdev_priv(net);
3678
3679         unlink_urbs(dev, &dev->txq);
3680         tasklet_schedule(&dev->bh);
3681 }
3682
3683 static const struct net_device_ops lan78xx_netdev_ops = {
3684         .ndo_open               = lan78xx_open,
3685         .ndo_stop               = lan78xx_stop,
3686         .ndo_start_xmit         = lan78xx_start_xmit,
3687         .ndo_tx_timeout         = lan78xx_tx_timeout,
3688         .ndo_change_mtu         = lan78xx_change_mtu,
3689         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3690         .ndo_validate_addr      = eth_validate_addr,
3691         .ndo_do_ioctl           = lan78xx_ioctl,
3692         .ndo_set_rx_mode        = lan78xx_set_multicast,
3693         .ndo_set_features       = lan78xx_set_features,
3694         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3695         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3696 };
3697
3698 static void lan78xx_stat_monitor(struct timer_list *t)
3699 {
3700         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3701
3702         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3703 }
3704
3705 static int lan78xx_probe(struct usb_interface *intf,
3706                          const struct usb_device_id *id)
3707 {
3708         struct lan78xx_net *dev;
3709         struct net_device *netdev;
3710         struct usb_device *udev;
3711         int ret;
3712         unsigned maxp;
3713         unsigned period;
3714         u8 *buf = NULL;
3715
3716         udev = interface_to_usbdev(intf);
3717         udev = usb_get_dev(udev);
3718
3719         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3720         if (!netdev) {
3721                 dev_err(&intf->dev, "Error: OOM\n");
3722                 ret = -ENOMEM;
3723                 goto out1;
3724         }
3725
3726         /* netdev_printk() needs this */
3727         SET_NETDEV_DEV(netdev, &intf->dev);
3728
3729         dev = netdev_priv(netdev);
3730         dev->udev = udev;
3731         dev->intf = intf;
3732         dev->net = netdev;
3733         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3734                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3735
3736         skb_queue_head_init(&dev->rxq);
3737         skb_queue_head_init(&dev->txq);
3738         skb_queue_head_init(&dev->done);
3739         skb_queue_head_init(&dev->rxq_pause);
3740         skb_queue_head_init(&dev->txq_pend);
3741         mutex_init(&dev->phy_mutex);
3742
3743         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3744         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3745         init_usb_anchor(&dev->deferred);
3746
3747         netdev->netdev_ops = &lan78xx_netdev_ops;
3748         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3749         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3750
3751         dev->delta = 1;
3752         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3753
3754         mutex_init(&dev->stats.access_lock);
3755
3756         ret = lan78xx_bind(dev, intf);
3757         if (ret < 0)
3758                 goto out2;
3759         strcpy(netdev->name, "eth%d");
3760
3761         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3762                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3763
3764         /* MTU range: 68 - 9000 */
3765         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3766
3767         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3768         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3769         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3770
3771         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3772         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3773
3774         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3775                                         dev->ep_intr->desc.bEndpointAddress &
3776                                         USB_ENDPOINT_NUMBER_MASK);
3777         period = dev->ep_intr->desc.bInterval;
3778
3779         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3780         buf = kmalloc(maxp, GFP_KERNEL);
3781         if (buf) {
3782                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3783                 if (!dev->urb_intr) {
3784                         ret = -ENOMEM;
3785                         kfree(buf);
3786                         goto out3;
3787                 } else {
3788                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3789                                          dev->pipe_intr, buf, maxp,
3790                                          intr_complete, dev, period);
3791                 }
3792         }
3793
3794         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3795
3796         /* driver requires remote-wakeup capability during autosuspend. */
3797         intf->needs_remote_wakeup = 1;
3798
3799         ret = register_netdev(netdev);
3800         if (ret != 0) {
3801                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3802                 goto out3;
3803         }
3804
3805         usb_set_intfdata(intf, dev);
3806
3807         ret = device_set_wakeup_enable(&udev->dev, true);
3808
3809          /* Default delay of 2sec has more overhead than advantage.
3810           * Set to 10sec as default.
3811           */
3812         pm_runtime_set_autosuspend_delay(&udev->dev,
3813                                          DEFAULT_AUTOSUSPEND_DELAY);
3814
3815         ret = lan78xx_phy_init(dev);
3816         if (ret < 0)
3817                 goto out4;
3818
3819         return 0;
3820
3821 out4:
3822         unregister_netdev(netdev);
3823 out3:
3824         lan78xx_unbind(dev, intf);
3825 out2:
3826         free_netdev(netdev);
3827 out1:
3828         usb_put_dev(udev);
3829
3830         return ret;
3831 }
3832
3833 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3834 {
3835         const u16 crc16poly = 0x8005;
3836         int i;
3837         u16 bit, crc, msb;
3838         u8 data;
3839
3840         crc = 0xFFFF;
3841         for (i = 0; i < len; i++) {
3842                 data = *buf++;
3843                 for (bit = 0; bit < 8; bit++) {
3844                         msb = crc >> 15;
3845                         crc <<= 1;
3846
3847                         if (msb ^ (u16)(data & 1)) {
3848                                 crc ^= crc16poly;
3849                                 crc |= (u16)0x0001U;
3850                         }
3851                         data >>= 1;
3852                 }
3853         }
3854
3855         return crc;
3856 }
3857
3858 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3859 {
3860         u32 buf;
3861         int ret;
3862         int mask_index;
3863         u16 crc;
3864         u32 temp_wucsr;
3865         u32 temp_pmt_ctl;
3866         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3867         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3868         const u8 arp_type[2] = { 0x08, 0x06 };
3869
3870         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3871         buf &= ~MAC_TX_TXEN_;
3872         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3873         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3874         buf &= ~MAC_RX_RXEN_;
3875         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3876
3877         ret = lan78xx_write_reg(dev, WUCSR, 0);
3878         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3879         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3880
3881         temp_wucsr = 0;
3882
3883         temp_pmt_ctl = 0;
3884         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3885         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3886         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3887
3888         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3889                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3890
3891         mask_index = 0;
3892         if (wol & WAKE_PHY) {
3893                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3894
3895                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3896                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3897                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3898         }
3899         if (wol & WAKE_MAGIC) {
3900                 temp_wucsr |= WUCSR_MPEN_;
3901
3902                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3903                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3904                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3905         }
3906         if (wol & WAKE_BCAST) {
3907                 temp_wucsr |= WUCSR_BCST_EN_;
3908
3909                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3910                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3911                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3912         }
3913         if (wol & WAKE_MCAST) {
3914                 temp_wucsr |= WUCSR_WAKE_EN_;
3915
3916                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3917                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3918                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3919                                         WUF_CFGX_EN_ |
3920                                         WUF_CFGX_TYPE_MCAST_ |
3921                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3922                                         (crc & WUF_CFGX_CRC16_MASK_));
3923
3924                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3925                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3926                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3927                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3928                 mask_index++;
3929
3930                 /* for IPv6 Multicast */
3931                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3932                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3933                                         WUF_CFGX_EN_ |
3934                                         WUF_CFGX_TYPE_MCAST_ |
3935                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3936                                         (crc & WUF_CFGX_CRC16_MASK_));
3937
3938                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3939                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3940                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3941                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3942                 mask_index++;
3943
3944                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3945                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3946                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3947         }
3948         if (wol & WAKE_UCAST) {
3949                 temp_wucsr |= WUCSR_PFDA_EN_;
3950
3951                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3952                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3953                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3954         }
3955         if (wol & WAKE_ARP) {
3956                 temp_wucsr |= WUCSR_WAKE_EN_;
3957
3958                 /* set WUF_CFG & WUF_MASK
3959                  * for packettype (offset 12,13) = ARP (0x0806)
3960                  */
3961                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3962                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3963                                         WUF_CFGX_EN_ |
3964                                         WUF_CFGX_TYPE_ALL_ |
3965                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3966                                         (crc & WUF_CFGX_CRC16_MASK_));
3967
3968                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3969                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3970                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3971                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3972                 mask_index++;
3973
3974                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3975                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3976                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3977         }
3978
3979         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3980
3981         /* when multiple WOL bits are set */
3982         if (hweight_long((unsigned long)wol) > 1) {
3983                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3984                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3985                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3986         }
3987         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3988
3989         /* clear WUPS */
3990         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3991         buf |= PMT_CTL_WUPS_MASK_;
3992         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3993
3994         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3995         buf |= MAC_RX_RXEN_;
3996         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3997
3998         return 0;
3999 }
4000
4001 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4002 {
4003         struct lan78xx_net *dev = usb_get_intfdata(intf);
4004         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4005         u32 buf;
4006         int ret;
4007         int event;
4008
4009         event = message.event;
4010
4011         if (!dev->suspend_count++) {
4012                 spin_lock_irq(&dev->txq.lock);
4013                 /* don't autosuspend while transmitting */
4014                 if ((skb_queue_len(&dev->txq) ||
4015                      skb_queue_len(&dev->txq_pend)) &&
4016                         PMSG_IS_AUTO(message)) {
4017                         spin_unlock_irq(&dev->txq.lock);
4018                         ret = -EBUSY;
4019                         goto out;
4020                 } else {
4021                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4022                         spin_unlock_irq(&dev->txq.lock);
4023                 }
4024
4025                 /* stop TX & RX */
4026                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4027                 buf &= ~MAC_TX_TXEN_;
4028                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4029                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4030                 buf &= ~MAC_RX_RXEN_;
4031                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4032
4033                 /* empty out the rx and queues */
4034                 netif_device_detach(dev->net);
4035                 lan78xx_terminate_urbs(dev);
4036                 usb_kill_urb(dev->urb_intr);
4037
4038                 /* reattach */
4039                 netif_device_attach(dev->net);
4040         }
4041
4042         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4043                 del_timer(&dev->stat_monitor);
4044
4045                 if (PMSG_IS_AUTO(message)) {
4046                         /* auto suspend (selective suspend) */
4047                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4048                         buf &= ~MAC_TX_TXEN_;
4049                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4050                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4051                         buf &= ~MAC_RX_RXEN_;
4052                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4053
4054                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4055                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4056                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4057
4058                         /* set goodframe wakeup */
4059                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4060
4061                         buf |= WUCSR_RFE_WAKE_EN_;
4062                         buf |= WUCSR_STORE_WAKE_;
4063
4064                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4065
4066                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4067
4068                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4069                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4070
4071                         buf |= PMT_CTL_PHY_WAKE_EN_;
4072                         buf |= PMT_CTL_WOL_EN_;
4073                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4074                         buf |= PMT_CTL_SUS_MODE_3_;
4075
4076                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4077
4078                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4079
4080                         buf |= PMT_CTL_WUPS_MASK_;
4081
4082                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4083
4084                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4085                         buf |= MAC_RX_RXEN_;
4086                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4087                 } else {
4088                         lan78xx_set_suspend(dev, pdata->wol);
4089                 }
4090         }
4091
4092         ret = 0;
4093 out:
4094         return ret;
4095 }
4096
4097 static int lan78xx_resume(struct usb_interface *intf)
4098 {
4099         struct lan78xx_net *dev = usb_get_intfdata(intf);
4100         struct sk_buff *skb;
4101         struct urb *res;
4102         int ret;
4103         u32 buf;
4104
4105         if (!timer_pending(&dev->stat_monitor)) {
4106                 dev->delta = 1;
4107                 mod_timer(&dev->stat_monitor,
4108                           jiffies + STAT_UPDATE_TIMER);
4109         }
4110
4111         if (!--dev->suspend_count) {
4112                 /* resume interrupt URBs */
4113                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4114                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4115
4116                 spin_lock_irq(&dev->txq.lock);
4117                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4118                         skb = (struct sk_buff *)res->context;
4119                         ret = usb_submit_urb(res, GFP_ATOMIC);
4120                         if (ret < 0) {
4121                                 dev_kfree_skb_any(skb);
4122                                 usb_free_urb(res);
4123                                 usb_autopm_put_interface_async(dev->intf);
4124                         } else {
4125                                 netif_trans_update(dev->net);
4126                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4127                         }
4128                 }
4129
4130                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4131                 spin_unlock_irq(&dev->txq.lock);
4132
4133                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4134                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4135                                 netif_start_queue(dev->net);
4136                         tasklet_schedule(&dev->bh);
4137                 }
4138         }
4139
4140         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4141         ret = lan78xx_write_reg(dev, WUCSR, 0);
4142         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4143
4144         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4145                                              WUCSR2_ARP_RCD_ |
4146                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4147                                              WUCSR2_IPV4_TCPSYN_RCD_);
4148
4149         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4150                                             WUCSR_EEE_RX_WAKE_ |
4151                                             WUCSR_PFDA_FR_ |
4152                                             WUCSR_RFE_WAKE_FR_ |
4153                                             WUCSR_WUFR_ |
4154                                             WUCSR_MPR_ |
4155                                             WUCSR_BCST_FR_);
4156
4157         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4158         buf |= MAC_TX_TXEN_;
4159         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4160
4161         return 0;
4162 }
4163
4164 static int lan78xx_reset_resume(struct usb_interface *intf)
4165 {
4166         struct lan78xx_net *dev = usb_get_intfdata(intf);
4167
4168         lan78xx_reset(dev);
4169
4170         phy_start(dev->net->phydev);
4171
4172         return lan78xx_resume(intf);
4173 }
4174
4175 static const struct usb_device_id products[] = {
4176         {
4177         /* LAN7800 USB Gigabit Ethernet Device */
4178         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4179         },
4180         {
4181         /* LAN7850 USB Gigabit Ethernet Device */
4182         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4183         },
4184         {
4185         /* LAN7801 USB Gigabit Ethernet Device */
4186         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4187         },
4188         {},
4189 };
4190 MODULE_DEVICE_TABLE(usb, products);
4191
4192 static struct usb_driver lan78xx_driver = {
4193         .name                   = DRIVER_NAME,
4194         .id_table               = products,
4195         .probe                  = lan78xx_probe,
4196         .disconnect             = lan78xx_disconnect,
4197         .suspend                = lan78xx_suspend,
4198         .resume                 = lan78xx_resume,
4199         .reset_resume           = lan78xx_reset_resume,
4200         .supports_autosuspend   = 1,
4201         .disable_hub_initiated_lpm = 1,
4202 };
4203
4204 module_usb_driver(lan78xx_driver);
4205
4206 MODULE_AUTHOR(DRIVER_AUTHOR);
4207 MODULE_DESCRIPTION(DRIVER_DESC);
4208 MODULE_LICENSE("GPL");