c4e748e92db40faed62fe7501ad2453daca95668
[linux-2.6-microblaze.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35
36 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME     "lan78xx"
39 #define DRIVER_VERSION  "1.0.4"
40
41 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
42 #define THROTTLE_JIFFIES                (HZ / 8)
43 #define UNLINK_TIMEOUT_MS               3
44
45 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
46
47 #define SS_USB_PKT_SIZE                 (1024)
48 #define HS_USB_PKT_SIZE                 (512)
49 #define FS_USB_PKT_SIZE                 (64)
50
51 #define MAX_RX_FIFO_SIZE                (12 * 1024)
52 #define MAX_TX_FIFO_SIZE                (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY           (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE          (9000)
56 #define DEFAULT_TX_CSUM_ENABLE          (true)
57 #define DEFAULT_RX_CSUM_ENABLE          (true)
58 #define DEFAULT_TSO_CSUM_ENABLE         (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
60 #define TX_OVERHEAD                     (8)
61 #define RXW_PADDING                     2
62
63 #define LAN78XX_USB_VENDOR_ID           (0x0424)
64 #define LAN7800_USB_PRODUCT_ID          (0x7800)
65 #define LAN7850_USB_PRODUCT_ID          (0x7850)
66 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
67 #define LAN78XX_OTP_MAGIC               (0x78F3)
68
69 #define MII_READ                        1
70 #define MII_WRITE                       0
71
72 #define EEPROM_INDICATOR                (0xA5)
73 #define EEPROM_MAC_OFFSET               (0x01)
74 #define MAX_EEPROM_SIZE                 512
75 #define OTP_INDICATOR_1                 (0xF3)
76 #define OTP_INDICATOR_2                 (0xF7)
77
78 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
79                                          WAKE_MCAST | WAKE_BCAST | \
80                                          WAKE_ARP | WAKE_MAGIC)
81
82 /* USB related defines */
83 #define BULK_IN_PIPE                    1
84 #define BULK_OUT_PIPE                   2
85
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
88
89 /* statistic update interval (mSec) */
90 #define STAT_UPDATE_TIMER               (1 * 1000)
91
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93         "RX FCS Errors",
94         "RX Alignment Errors",
95         "Rx Fragment Errors",
96         "RX Jabber Errors",
97         "RX Undersize Frame Errors",
98         "RX Oversize Frame Errors",
99         "RX Dropped Frames",
100         "RX Unicast Byte Count",
101         "RX Broadcast Byte Count",
102         "RX Multicast Byte Count",
103         "RX Unicast Frames",
104         "RX Broadcast Frames",
105         "RX Multicast Frames",
106         "RX Pause Frames",
107         "RX 64 Byte Frames",
108         "RX 65 - 127 Byte Frames",
109         "RX 128 - 255 Byte Frames",
110         "RX 256 - 511 Bytes Frames",
111         "RX 512 - 1023 Byte Frames",
112         "RX 1024 - 1518 Byte Frames",
113         "RX Greater 1518 Byte Frames",
114         "EEE RX LPI Transitions",
115         "EEE RX LPI Time",
116         "TX FCS Errors",
117         "TX Excess Deferral Errors",
118         "TX Carrier Errors",
119         "TX Bad Byte Count",
120         "TX Single Collisions",
121         "TX Multiple Collisions",
122         "TX Excessive Collision",
123         "TX Late Collisions",
124         "TX Unicast Byte Count",
125         "TX Broadcast Byte Count",
126         "TX Multicast Byte Count",
127         "TX Unicast Frames",
128         "TX Broadcast Frames",
129         "TX Multicast Frames",
130         "TX Pause Frames",
131         "TX 64 Byte Frames",
132         "TX 65 - 127 Byte Frames",
133         "TX 128 - 255 Byte Frames",
134         "TX 256 - 511 Bytes Frames",
135         "TX 512 - 1023 Byte Frames",
136         "TX 1024 - 1518 Byte Frames",
137         "TX Greater 1518 Byte Frames",
138         "EEE TX LPI Transitions",
139         "EEE TX LPI Time",
140 };
141
142 struct lan78xx_statstage {
143         u32 rx_fcs_errors;
144         u32 rx_alignment_errors;
145         u32 rx_fragment_errors;
146         u32 rx_jabber_errors;
147         u32 rx_undersize_frame_errors;
148         u32 rx_oversize_frame_errors;
149         u32 rx_dropped_frames;
150         u32 rx_unicast_byte_count;
151         u32 rx_broadcast_byte_count;
152         u32 rx_multicast_byte_count;
153         u32 rx_unicast_frames;
154         u32 rx_broadcast_frames;
155         u32 rx_multicast_frames;
156         u32 rx_pause_frames;
157         u32 rx_64_byte_frames;
158         u32 rx_65_127_byte_frames;
159         u32 rx_128_255_byte_frames;
160         u32 rx_256_511_bytes_frames;
161         u32 rx_512_1023_byte_frames;
162         u32 rx_1024_1518_byte_frames;
163         u32 rx_greater_1518_byte_frames;
164         u32 eee_rx_lpi_transitions;
165         u32 eee_rx_lpi_time;
166         u32 tx_fcs_errors;
167         u32 tx_excess_deferral_errors;
168         u32 tx_carrier_errors;
169         u32 tx_bad_byte_count;
170         u32 tx_single_collisions;
171         u32 tx_multiple_collisions;
172         u32 tx_excessive_collision;
173         u32 tx_late_collisions;
174         u32 tx_unicast_byte_count;
175         u32 tx_broadcast_byte_count;
176         u32 tx_multicast_byte_count;
177         u32 tx_unicast_frames;
178         u32 tx_broadcast_frames;
179         u32 tx_multicast_frames;
180         u32 tx_pause_frames;
181         u32 tx_64_byte_frames;
182         u32 tx_65_127_byte_frames;
183         u32 tx_128_255_byte_frames;
184         u32 tx_256_511_bytes_frames;
185         u32 tx_512_1023_byte_frames;
186         u32 tx_1024_1518_byte_frames;
187         u32 tx_greater_1518_byte_frames;
188         u32 eee_tx_lpi_transitions;
189         u32 eee_tx_lpi_time;
190 };
191
192 struct lan78xx_statstage64 {
193         u64 rx_fcs_errors;
194         u64 rx_alignment_errors;
195         u64 rx_fragment_errors;
196         u64 rx_jabber_errors;
197         u64 rx_undersize_frame_errors;
198         u64 rx_oversize_frame_errors;
199         u64 rx_dropped_frames;
200         u64 rx_unicast_byte_count;
201         u64 rx_broadcast_byte_count;
202         u64 rx_multicast_byte_count;
203         u64 rx_unicast_frames;
204         u64 rx_broadcast_frames;
205         u64 rx_multicast_frames;
206         u64 rx_pause_frames;
207         u64 rx_64_byte_frames;
208         u64 rx_65_127_byte_frames;
209         u64 rx_128_255_byte_frames;
210         u64 rx_256_511_bytes_frames;
211         u64 rx_512_1023_byte_frames;
212         u64 rx_1024_1518_byte_frames;
213         u64 rx_greater_1518_byte_frames;
214         u64 eee_rx_lpi_transitions;
215         u64 eee_rx_lpi_time;
216         u64 tx_fcs_errors;
217         u64 tx_excess_deferral_errors;
218         u64 tx_carrier_errors;
219         u64 tx_bad_byte_count;
220         u64 tx_single_collisions;
221         u64 tx_multiple_collisions;
222         u64 tx_excessive_collision;
223         u64 tx_late_collisions;
224         u64 tx_unicast_byte_count;
225         u64 tx_broadcast_byte_count;
226         u64 tx_multicast_byte_count;
227         u64 tx_unicast_frames;
228         u64 tx_broadcast_frames;
229         u64 tx_multicast_frames;
230         u64 tx_pause_frames;
231         u64 tx_64_byte_frames;
232         u64 tx_65_127_byte_frames;
233         u64 tx_128_255_byte_frames;
234         u64 tx_256_511_bytes_frames;
235         u64 tx_512_1023_byte_frames;
236         u64 tx_1024_1518_byte_frames;
237         u64 tx_greater_1518_byte_frames;
238         u64 eee_tx_lpi_transitions;
239         u64 eee_tx_lpi_time;
240 };
241
242 struct lan78xx_net;
243
244 struct lan78xx_priv {
245         struct lan78xx_net *dev;
246         u32 rfe_ctl;
247         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
248         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
249         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
250         struct mutex dataport_mutex; /* for dataport access */
251         spinlock_t rfe_ctl_lock; /* for rfe register access */
252         struct work_struct set_multicast;
253         struct work_struct set_vlan;
254         u32 wol;
255 };
256
257 enum skb_state {
258         illegal = 0,
259         tx_start,
260         tx_done,
261         rx_start,
262         rx_done,
263         rx_cleanup,
264         unlink_start
265 };
266
267 struct skb_data {               /* skb->cb is one of these */
268         struct urb *urb;
269         struct lan78xx_net *dev;
270         enum skb_state state;
271         size_t length;
272         int num_of_packet;
273 };
274
275 struct usb_context {
276         struct usb_ctrlrequest req;
277         struct lan78xx_net *dev;
278 };
279
280 #define EVENT_TX_HALT                   0
281 #define EVENT_RX_HALT                   1
282 #define EVENT_RX_MEMORY                 2
283 #define EVENT_STS_SPLIT                 3
284 #define EVENT_LINK_RESET                4
285 #define EVENT_RX_PAUSED                 5
286 #define EVENT_DEV_WAKING                6
287 #define EVENT_DEV_ASLEEP                7
288 #define EVENT_DEV_OPEN                  8
289 #define EVENT_STAT_UPDATE               9
290
291 struct statstage {
292         struct mutex                    access_lock;    /* for stats access */
293         struct lan78xx_statstage        saved;
294         struct lan78xx_statstage        rollover_count;
295         struct lan78xx_statstage        rollover_max;
296         struct lan78xx_statstage64      curr_stat;
297 };
298
299 struct lan78xx_net {
300         struct net_device       *net;
301         struct usb_device       *udev;
302         struct usb_interface    *intf;
303         void                    *driver_priv;
304
305         int                     rx_qlen;
306         int                     tx_qlen;
307         struct sk_buff_head     rxq;
308         struct sk_buff_head     txq;
309         struct sk_buff_head     done;
310         struct sk_buff_head     rxq_pause;
311         struct sk_buff_head     txq_pend;
312
313         struct tasklet_struct   bh;
314         struct delayed_work     wq;
315
316         struct usb_host_endpoint *ep_blkin;
317         struct usb_host_endpoint *ep_blkout;
318         struct usb_host_endpoint *ep_intr;
319
320         int                     msg_enable;
321
322         struct urb              *urb_intr;
323         struct usb_anchor       deferred;
324
325         struct mutex            phy_mutex; /* for phy access */
326         unsigned                pipe_in, pipe_out, pipe_intr;
327
328         u32                     hard_mtu;       /* count any extra framing */
329         size_t                  rx_urb_size;    /* size for rx urbs */
330
331         unsigned long           flags;
332
333         wait_queue_head_t       *wait;
334         unsigned char           suspend_count;
335
336         unsigned                maxpacket;
337         struct timer_list       delay;
338         struct timer_list       stat_monitor;
339
340         unsigned long           data[5];
341
342         int                     link_on;
343         u8                      mdix_ctrl;
344
345         u32                     chipid;
346         u32                     chiprev;
347         struct mii_bus          *mdiobus;
348
349         int                     fc_autoneg;
350         u8                      fc_request_control;
351
352         int                     delta;
353         struct statstage        stats;
354 };
355
356 /* use ethtool to change the level for any given device */
357 static int msg_level = -1;
358 module_param(msg_level, int, 0);
359 MODULE_PARM_DESC(msg_level, "Override default message level");
360
361 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
362 {
363         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
364         int ret;
365
366         if (!buf)
367                 return -ENOMEM;
368
369         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
370                               USB_VENDOR_REQUEST_READ_REGISTER,
371                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
372                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
373         if (likely(ret >= 0)) {
374                 le32_to_cpus(buf);
375                 *data = *buf;
376         } else {
377                 netdev_warn(dev->net,
378                             "Failed to read register index 0x%08x. ret = %d",
379                             index, ret);
380         }
381
382         kfree(buf);
383
384         return ret;
385 }
386
387 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
388 {
389         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
390         int ret;
391
392         if (!buf)
393                 return -ENOMEM;
394
395         *buf = data;
396         cpu_to_le32s(buf);
397
398         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
399                               USB_VENDOR_REQUEST_WRITE_REGISTER,
400                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
401                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
402         if (unlikely(ret < 0)) {
403                 netdev_warn(dev->net,
404                             "Failed to write register index 0x%08x. ret = %d",
405                             index, ret);
406         }
407
408         kfree(buf);
409
410         return ret;
411 }
412
413 static int lan78xx_read_stats(struct lan78xx_net *dev,
414                               struct lan78xx_statstage *data)
415 {
416         int ret = 0;
417         int i;
418         struct lan78xx_statstage *stats;
419         u32 *src;
420         u32 *dst;
421
422         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
423         if (!stats)
424                 return -ENOMEM;
425
426         ret = usb_control_msg(dev->udev,
427                               usb_rcvctrlpipe(dev->udev, 0),
428                               USB_VENDOR_REQUEST_GET_STATS,
429                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
430                               0,
431                               0,
432                               (void *)stats,
433                               sizeof(*stats),
434                               USB_CTRL_SET_TIMEOUT);
435         if (likely(ret >= 0)) {
436                 src = (u32 *)stats;
437                 dst = (u32 *)data;
438                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
439                         le32_to_cpus(&src[i]);
440                         dst[i] = src[i];
441                 }
442         } else {
443                 netdev_warn(dev->net,
444                             "Failed to read stat ret = 0x%x", ret);
445         }
446
447         kfree(stats);
448
449         return ret;
450 }
451
452 #define check_counter_rollover(struct1, dev_stats, member) {    \
453         if (struct1->member < dev_stats.saved.member)           \
454                 dev_stats.rollover_count.member++;              \
455         }
456
457 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
458                                         struct lan78xx_statstage *stats)
459 {
460         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
461         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
462         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
463         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
464         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
465         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
466         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
467         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
468         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
469         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
470         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
471         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
472         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
473         check_counter_rollover(stats, dev->stats, rx_pause_frames);
474         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
475         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
476         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
477         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
478         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
479         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
480         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
481         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
482         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
483         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
484         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
485         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
486         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
487         check_counter_rollover(stats, dev->stats, tx_single_collisions);
488         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
489         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
490         check_counter_rollover(stats, dev->stats, tx_late_collisions);
491         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
492         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
493         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
494         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
495         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
496         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
497         check_counter_rollover(stats, dev->stats, tx_pause_frames);
498         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
499         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
500         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
501         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
502         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
503         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
504         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
505         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
506         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
507
508         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
509 }
510
511 static void lan78xx_update_stats(struct lan78xx_net *dev)
512 {
513         u32 *p, *count, *max;
514         u64 *data;
515         int i;
516         struct lan78xx_statstage lan78xx_stats;
517
518         if (usb_autopm_get_interface(dev->intf) < 0)
519                 return;
520
521         p = (u32 *)&lan78xx_stats;
522         count = (u32 *)&dev->stats.rollover_count;
523         max = (u32 *)&dev->stats.rollover_max;
524         data = (u64 *)&dev->stats.curr_stat;
525
526         mutex_lock(&dev->stats.access_lock);
527
528         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
529                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
530
531         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
532                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
533
534         mutex_unlock(&dev->stats.access_lock);
535
536         usb_autopm_put_interface(dev->intf);
537 }
538
539 /* Loop until the read is completed with timeout called with phy_mutex held */
540 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
541 {
542         unsigned long start_time = jiffies;
543         u32 val;
544         int ret;
545
546         do {
547                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
548                 if (unlikely(ret < 0))
549                         return -EIO;
550
551                 if (!(val & MII_ACC_MII_BUSY_))
552                         return 0;
553         } while (!time_after(jiffies, start_time + HZ));
554
555         return -EIO;
556 }
557
558 static inline u32 mii_access(int id, int index, int read)
559 {
560         u32 ret;
561
562         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
563         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
564         if (read)
565                 ret |= MII_ACC_MII_READ_;
566         else
567                 ret |= MII_ACC_MII_WRITE_;
568         ret |= MII_ACC_MII_BUSY_;
569
570         return ret;
571 }
572
573 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
574 {
575         unsigned long start_time = jiffies;
576         u32 val;
577         int ret;
578
579         do {
580                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
581                 if (unlikely(ret < 0))
582                         return -EIO;
583
584                 if (!(val & E2P_CMD_EPC_BUSY_) ||
585                     (val & E2P_CMD_EPC_TIMEOUT_))
586                         break;
587                 usleep_range(40, 100);
588         } while (!time_after(jiffies, start_time + HZ));
589
590         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
591                 netdev_warn(dev->net, "EEPROM read operation timeout");
592                 return -EIO;
593         }
594
595         return 0;
596 }
597
598 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
599 {
600         unsigned long start_time = jiffies;
601         u32 val;
602         int ret;
603
604         do {
605                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
606                 if (unlikely(ret < 0))
607                         return -EIO;
608
609                 if (!(val & E2P_CMD_EPC_BUSY_))
610                         return 0;
611
612                 usleep_range(40, 100);
613         } while (!time_after(jiffies, start_time + HZ));
614
615         netdev_warn(dev->net, "EEPROM is busy");
616         return -EIO;
617 }
618
619 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
620                                    u32 length, u8 *data)
621 {
622         u32 val;
623         u32 saved;
624         int i, ret;
625         int retval;
626
627         /* depends on chip, some EEPROM pins are muxed with LED function.
628          * disable & restore LED function to access EEPROM.
629          */
630         ret = lan78xx_read_reg(dev, HW_CFG, &val);
631         saved = val;
632         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
633                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
634                 ret = lan78xx_write_reg(dev, HW_CFG, val);
635         }
636
637         retval = lan78xx_eeprom_confirm_not_busy(dev);
638         if (retval)
639                 return retval;
640
641         for (i = 0; i < length; i++) {
642                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
643                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
644                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
645                 if (unlikely(ret < 0)) {
646                         retval = -EIO;
647                         goto exit;
648                 }
649
650                 retval = lan78xx_wait_eeprom(dev);
651                 if (retval < 0)
652                         goto exit;
653
654                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
655                 if (unlikely(ret < 0)) {
656                         retval = -EIO;
657                         goto exit;
658                 }
659
660                 data[i] = val & 0xFF;
661                 offset++;
662         }
663
664         retval = 0;
665 exit:
666         if (dev->chipid == ID_REV_CHIP_ID_7800_)
667                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
668
669         return retval;
670 }
671
672 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
673                                u32 length, u8 *data)
674 {
675         u8 sig;
676         int ret;
677
678         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
679         if ((ret == 0) && (sig == EEPROM_INDICATOR))
680                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
681         else
682                 ret = -EINVAL;
683
684         return ret;
685 }
686
687 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
688                                     u32 length, u8 *data)
689 {
690         u32 val;
691         u32 saved;
692         int i, ret;
693         int retval;
694
695         /* depends on chip, some EEPROM pins are muxed with LED function.
696          * disable & restore LED function to access EEPROM.
697          */
698         ret = lan78xx_read_reg(dev, HW_CFG, &val);
699         saved = val;
700         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
701                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
702                 ret = lan78xx_write_reg(dev, HW_CFG, val);
703         }
704
705         retval = lan78xx_eeprom_confirm_not_busy(dev);
706         if (retval)
707                 goto exit;
708
709         /* Issue write/erase enable command */
710         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
711         ret = lan78xx_write_reg(dev, E2P_CMD, val);
712         if (unlikely(ret < 0)) {
713                 retval = -EIO;
714                 goto exit;
715         }
716
717         retval = lan78xx_wait_eeprom(dev);
718         if (retval < 0)
719                 goto exit;
720
721         for (i = 0; i < length; i++) {
722                 /* Fill data register */
723                 val = data[i];
724                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
725                 if (ret < 0) {
726                         retval = -EIO;
727                         goto exit;
728                 }
729
730                 /* Send "write" command */
731                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
732                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
733                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
734                 if (ret < 0) {
735                         retval = -EIO;
736                         goto exit;
737                 }
738
739                 retval = lan78xx_wait_eeprom(dev);
740                 if (retval < 0)
741                         goto exit;
742
743                 offset++;
744         }
745
746         retval = 0;
747 exit:
748         if (dev->chipid == ID_REV_CHIP_ID_7800_)
749                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
750
751         return retval;
752 }
753
754 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
755                                 u32 length, u8 *data)
756 {
757         int i;
758         int ret;
759         u32 buf;
760         unsigned long timeout;
761
762         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
763
764         if (buf & OTP_PWR_DN_PWRDN_N_) {
765                 /* clear it and wait to be cleared */
766                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
767
768                 timeout = jiffies + HZ;
769                 do {
770                         usleep_range(1, 10);
771                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
772                         if (time_after(jiffies, timeout)) {
773                                 netdev_warn(dev->net,
774                                             "timeout on OTP_PWR_DN");
775                                 return -EIO;
776                         }
777                 } while (buf & OTP_PWR_DN_PWRDN_N_);
778         }
779
780         for (i = 0; i < length; i++) {
781                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
782                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
783                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
784                                         ((offset + i) & OTP_ADDR2_10_3));
785
786                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
787                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
788
789                 timeout = jiffies + HZ;
790                 do {
791                         udelay(1);
792                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
793                         if (time_after(jiffies, timeout)) {
794                                 netdev_warn(dev->net,
795                                             "timeout on OTP_STATUS");
796                                 return -EIO;
797                         }
798                 } while (buf & OTP_STATUS_BUSY_);
799
800                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
801
802                 data[i] = (u8)(buf & 0xFF);
803         }
804
805         return 0;
806 }
807
808 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
809                                  u32 length, u8 *data)
810 {
811         int i;
812         int ret;
813         u32 buf;
814         unsigned long timeout;
815
816         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817
818         if (buf & OTP_PWR_DN_PWRDN_N_) {
819                 /* clear it and wait to be cleared */
820                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821
822                 timeout = jiffies + HZ;
823                 do {
824                         udelay(1);
825                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
826                         if (time_after(jiffies, timeout)) {
827                                 netdev_warn(dev->net,
828                                             "timeout on OTP_PWR_DN completion");
829                                 return -EIO;
830                         }
831                 } while (buf & OTP_PWR_DN_PWRDN_N_);
832         }
833
834         /* set to BYTE program mode */
835         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
836
837         for (i = 0; i < length; i++) {
838                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
839                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
840                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
841                                         ((offset + i) & OTP_ADDR2_10_3));
842                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
843                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
844                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
845
846                 timeout = jiffies + HZ;
847                 do {
848                         udelay(1);
849                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
850                         if (time_after(jiffies, timeout)) {
851                                 netdev_warn(dev->net,
852                                             "Timeout on OTP_STATUS completion");
853                                 return -EIO;
854                         }
855                 } while (buf & OTP_STATUS_BUSY_);
856         }
857
858         return 0;
859 }
860
861 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
862                             u32 length, u8 *data)
863 {
864         u8 sig;
865         int ret;
866
867         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
868
869         if (ret == 0) {
870                 if (sig == OTP_INDICATOR_1)
871                         offset = offset;
872                 else if (sig == OTP_INDICATOR_2)
873                         offset += 0x100;
874                 else
875                         ret = -EINVAL;
876                 ret = lan78xx_read_raw_otp(dev, offset, length, data);
877         }
878
879         return ret;
880 }
881
882 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
883 {
884         int i, ret;
885
886         for (i = 0; i < 100; i++) {
887                 u32 dp_sel;
888
889                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
890                 if (unlikely(ret < 0))
891                         return -EIO;
892
893                 if (dp_sel & DP_SEL_DPRDY_)
894                         return 0;
895
896                 usleep_range(40, 100);
897         }
898
899         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
900
901         return -EIO;
902 }
903
904 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
905                                   u32 addr, u32 length, u32 *buf)
906 {
907         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
908         u32 dp_sel;
909         int i, ret;
910
911         if (usb_autopm_get_interface(dev->intf) < 0)
912                         return 0;
913
914         mutex_lock(&pdata->dataport_mutex);
915
916         ret = lan78xx_dataport_wait_not_busy(dev);
917         if (ret < 0)
918                 goto done;
919
920         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
921
922         dp_sel &= ~DP_SEL_RSEL_MASK_;
923         dp_sel |= ram_select;
924         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
925
926         for (i = 0; i < length; i++) {
927                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
928
929                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
930
931                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
932
933                 ret = lan78xx_dataport_wait_not_busy(dev);
934                 if (ret < 0)
935                         goto done;
936         }
937
938 done:
939         mutex_unlock(&pdata->dataport_mutex);
940         usb_autopm_put_interface(dev->intf);
941
942         return ret;
943 }
944
945 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
946                                     int index, u8 addr[ETH_ALEN])
947 {
948         u32     temp;
949
950         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
951                 temp = addr[3];
952                 temp = addr[2] | (temp << 8);
953                 temp = addr[1] | (temp << 8);
954                 temp = addr[0] | (temp << 8);
955                 pdata->pfilter_table[index][1] = temp;
956                 temp = addr[5];
957                 temp = addr[4] | (temp << 8);
958                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
959                 pdata->pfilter_table[index][0] = temp;
960         }
961 }
962
963 /* returns hash bit number for given MAC address */
964 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
965 {
966         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
967 }
968
969 static void lan78xx_deferred_multicast_write(struct work_struct *param)
970 {
971         struct lan78xx_priv *pdata =
972                         container_of(param, struct lan78xx_priv, set_multicast);
973         struct lan78xx_net *dev = pdata->dev;
974         int i;
975         int ret;
976
977         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
978                   pdata->rfe_ctl);
979
980         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
981                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
982
983         for (i = 1; i < NUM_OF_MAF; i++) {
984                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
985                 ret = lan78xx_write_reg(dev, MAF_LO(i),
986                                         pdata->pfilter_table[i][1]);
987                 ret = lan78xx_write_reg(dev, MAF_HI(i),
988                                         pdata->pfilter_table[i][0]);
989         }
990
991         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
992 }
993
994 static void lan78xx_set_multicast(struct net_device *netdev)
995 {
996         struct lan78xx_net *dev = netdev_priv(netdev);
997         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
998         unsigned long flags;
999         int i;
1000
1001         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1002
1003         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1004                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1005
1006         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1007                         pdata->mchash_table[i] = 0;
1008         /* pfilter_table[0] has own HW address */
1009         for (i = 1; i < NUM_OF_MAF; i++) {
1010                         pdata->pfilter_table[i][0] =
1011                         pdata->pfilter_table[i][1] = 0;
1012         }
1013
1014         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1015
1016         if (dev->net->flags & IFF_PROMISC) {
1017                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1018                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1019         } else {
1020                 if (dev->net->flags & IFF_ALLMULTI) {
1021                         netif_dbg(dev, drv, dev->net,
1022                                   "receive all multicast enabled");
1023                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1024                 }
1025         }
1026
1027         if (netdev_mc_count(dev->net)) {
1028                 struct netdev_hw_addr *ha;
1029                 int i;
1030
1031                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1032
1033                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1034
1035                 i = 1;
1036                 netdev_for_each_mc_addr(ha, netdev) {
1037                         /* set first 32 into Perfect Filter */
1038                         if (i < 33) {
1039                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1040                         } else {
1041                                 u32 bitnum = lan78xx_hash(ha->addr);
1042
1043                                 pdata->mchash_table[bitnum / 32] |=
1044                                                         (1 << (bitnum % 32));
1045                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1046                         }
1047                         i++;
1048                 }
1049         }
1050
1051         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1052
1053         /* defer register writes to a sleepable context */
1054         schedule_work(&pdata->set_multicast);
1055 }
1056
1057 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1058                                       u16 lcladv, u16 rmtadv)
1059 {
1060         u32 flow = 0, fct_flow = 0;
1061         int ret;
1062         u8 cap;
1063
1064         if (dev->fc_autoneg)
1065                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1066         else
1067                 cap = dev->fc_request_control;
1068
1069         if (cap & FLOW_CTRL_TX)
1070                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1071
1072         if (cap & FLOW_CTRL_RX)
1073                 flow |= FLOW_CR_RX_FCEN_;
1074
1075         if (dev->udev->speed == USB_SPEED_SUPER)
1076                 fct_flow = 0x817;
1077         else if (dev->udev->speed == USB_SPEED_HIGH)
1078                 fct_flow = 0x211;
1079
1080         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1081                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1082                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1083
1084         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1085
1086         /* threshold value should be set before enabling flow */
1087         ret = lan78xx_write_reg(dev, FLOW, flow);
1088
1089         return 0;
1090 }
1091
1092 static int lan78xx_link_reset(struct lan78xx_net *dev)
1093 {
1094         struct phy_device *phydev = dev->net->phydev;
1095         struct ethtool_link_ksettings ecmd;
1096         int ladv, radv, ret;
1097         u32 buf;
1098
1099         /* clear PHY interrupt status */
1100         ret = phy_read(phydev, LAN88XX_INT_STS);
1101         if (unlikely(ret < 0))
1102                 return -EIO;
1103
1104         /* clear LAN78xx interrupt status */
1105         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1106         if (unlikely(ret < 0))
1107                 return -EIO;
1108
1109         phy_read_status(phydev);
1110
1111         if (!phydev->link && dev->link_on) {
1112                 dev->link_on = false;
1113
1114                 /* reset MAC */
1115                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1116                 if (unlikely(ret < 0))
1117                         return -EIO;
1118                 buf |= MAC_CR_RST_;
1119                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1120                 if (unlikely(ret < 0))
1121                         return -EIO;
1122
1123                 phy_mac_interrupt(phydev, 0);
1124
1125                 del_timer(&dev->stat_monitor);
1126         } else if (phydev->link && !dev->link_on) {
1127                 dev->link_on = true;
1128
1129                 phy_ethtool_ksettings_get(phydev, &ecmd);
1130
1131                 ret = phy_read(phydev, LAN88XX_INT_STS);
1132
1133                 if (dev->udev->speed == USB_SPEED_SUPER) {
1134                         if (ecmd.base.speed == 1000) {
1135                                 /* disable U2 */
1136                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1137                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1138                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1139                                 /* enable U1 */
1140                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1141                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1142                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1143                         } else {
1144                                 /* enable U1 & U2 */
1145                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1146                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1147                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1148                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1149                         }
1150                 }
1151
1152                 ladv = phy_read(phydev, MII_ADVERTISE);
1153                 if (ladv < 0)
1154                         return ladv;
1155
1156                 radv = phy_read(phydev, MII_LPA);
1157                 if (radv < 0)
1158                         return radv;
1159
1160                 netif_dbg(dev, link, dev->net,
1161                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1162                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1163
1164                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1165                                                  radv);
1166                 phy_mac_interrupt(phydev, 1);
1167
1168                 if (!timer_pending(&dev->stat_monitor)) {
1169                         dev->delta = 1;
1170                         mod_timer(&dev->stat_monitor,
1171                                   jiffies + STAT_UPDATE_TIMER);
1172                 }
1173         }
1174
1175         return ret;
1176 }
1177
1178 /* some work can't be done in tasklets, so we use keventd
1179  *
1180  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1181  * but tasklet_schedule() doesn't.      hope the failure is rare.
1182  */
1183 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1184 {
1185         set_bit(work, &dev->flags);
1186         if (!schedule_delayed_work(&dev->wq, 0))
1187                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 }
1189
1190 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1191 {
1192         u32 intdata;
1193
1194         if (urb->actual_length != 4) {
1195                 netdev_warn(dev->net,
1196                             "unexpected urb length %d", urb->actual_length);
1197                 return;
1198         }
1199
1200         memcpy(&intdata, urb->transfer_buffer, 4);
1201         le32_to_cpus(&intdata);
1202
1203         if (intdata & INT_ENP_PHY_INT) {
1204                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1205                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1206         } else
1207                 netdev_warn(dev->net,
1208                             "unexpected interrupt: 0x%08x\n", intdata);
1209 }
1210
1211 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1212 {
1213         return MAX_EEPROM_SIZE;
1214 }
1215
1216 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1217                                       struct ethtool_eeprom *ee, u8 *data)
1218 {
1219         struct lan78xx_net *dev = netdev_priv(netdev);
1220
1221         ee->magic = LAN78XX_EEPROM_MAGIC;
1222
1223         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 }
1225
1226 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1227                                       struct ethtool_eeprom *ee, u8 *data)
1228 {
1229         struct lan78xx_net *dev = netdev_priv(netdev);
1230
1231         /* Allow entire eeprom update only */
1232         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1233             (ee->offset == 0) &&
1234             (ee->len == 512) &&
1235             (data[0] == EEPROM_INDICATOR))
1236                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1237         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1238                  (ee->offset == 0) &&
1239                  (ee->len == 512) &&
1240                  (data[0] == OTP_INDICATOR_1))
1241                 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1242
1243         return -EINVAL;
1244 }
1245
1246 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1247                                 u8 *data)
1248 {
1249         if (stringset == ETH_SS_STATS)
1250                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 }
1252
1253 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1254 {
1255         if (sset == ETH_SS_STATS)
1256                 return ARRAY_SIZE(lan78xx_gstrings);
1257         else
1258                 return -EOPNOTSUPP;
1259 }
1260
1261 static void lan78xx_get_stats(struct net_device *netdev,
1262                               struct ethtool_stats *stats, u64 *data)
1263 {
1264         struct lan78xx_net *dev = netdev_priv(netdev);
1265
1266         lan78xx_update_stats(dev);
1267
1268         mutex_lock(&dev->stats.access_lock);
1269         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1270         mutex_unlock(&dev->stats.access_lock);
1271 }
1272
1273 static void lan78xx_get_wol(struct net_device *netdev,
1274                             struct ethtool_wolinfo *wol)
1275 {
1276         struct lan78xx_net *dev = netdev_priv(netdev);
1277         int ret;
1278         u32 buf;
1279         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1280
1281         if (usb_autopm_get_interface(dev->intf) < 0)
1282                         return;
1283
1284         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1285         if (unlikely(ret < 0)) {
1286                 wol->supported = 0;
1287                 wol->wolopts = 0;
1288         } else {
1289                 if (buf & USB_CFG_RMT_WKP_) {
1290                         wol->supported = WAKE_ALL;
1291                         wol->wolopts = pdata->wol;
1292                 } else {
1293                         wol->supported = 0;
1294                         wol->wolopts = 0;
1295                 }
1296         }
1297
1298         usb_autopm_put_interface(dev->intf);
1299 }
1300
1301 static int lan78xx_set_wol(struct net_device *netdev,
1302                            struct ethtool_wolinfo *wol)
1303 {
1304         struct lan78xx_net *dev = netdev_priv(netdev);
1305         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1306         int ret;
1307
1308         ret = usb_autopm_get_interface(dev->intf);
1309         if (ret < 0)
1310                 return ret;
1311
1312         pdata->wol = 0;
1313         if (wol->wolopts & WAKE_UCAST)
1314                 pdata->wol |= WAKE_UCAST;
1315         if (wol->wolopts & WAKE_MCAST)
1316                 pdata->wol |= WAKE_MCAST;
1317         if (wol->wolopts & WAKE_BCAST)
1318                 pdata->wol |= WAKE_BCAST;
1319         if (wol->wolopts & WAKE_MAGIC)
1320                 pdata->wol |= WAKE_MAGIC;
1321         if (wol->wolopts & WAKE_PHY)
1322                 pdata->wol |= WAKE_PHY;
1323         if (wol->wolopts & WAKE_ARP)
1324                 pdata->wol |= WAKE_ARP;
1325
1326         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1327
1328         phy_ethtool_set_wol(netdev->phydev, wol);
1329
1330         usb_autopm_put_interface(dev->intf);
1331
1332         return ret;
1333 }
1334
1335 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1336 {
1337         struct lan78xx_net *dev = netdev_priv(net);
1338         struct phy_device *phydev = net->phydev;
1339         int ret;
1340         u32 buf;
1341
1342         ret = usb_autopm_get_interface(dev->intf);
1343         if (ret < 0)
1344                 return ret;
1345
1346         ret = phy_ethtool_get_eee(phydev, edata);
1347         if (ret < 0)
1348                 goto exit;
1349
1350         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1351         if (buf & MAC_CR_EEE_EN_) {
1352                 edata->eee_enabled = true;
1353                 edata->eee_active = !!(edata->advertised &
1354                                        edata->lp_advertised);
1355                 edata->tx_lpi_enabled = true;
1356                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1357                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1358                 edata->tx_lpi_timer = buf;
1359         } else {
1360                 edata->eee_enabled = false;
1361                 edata->eee_active = false;
1362                 edata->tx_lpi_enabled = false;
1363                 edata->tx_lpi_timer = 0;
1364         }
1365
1366         ret = 0;
1367 exit:
1368         usb_autopm_put_interface(dev->intf);
1369
1370         return ret;
1371 }
1372
1373 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1374 {
1375         struct lan78xx_net *dev = netdev_priv(net);
1376         int ret;
1377         u32 buf;
1378
1379         ret = usb_autopm_get_interface(dev->intf);
1380         if (ret < 0)
1381                 return ret;
1382
1383         if (edata->eee_enabled) {
1384                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1385                 buf |= MAC_CR_EEE_EN_;
1386                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1387
1388                 phy_ethtool_set_eee(net->phydev, edata);
1389
1390                 buf = (u32)edata->tx_lpi_timer;
1391                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1392         } else {
1393                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1394                 buf &= ~MAC_CR_EEE_EN_;
1395                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1396         }
1397
1398         usb_autopm_put_interface(dev->intf);
1399
1400         return 0;
1401 }
1402
1403 static u32 lan78xx_get_link(struct net_device *net)
1404 {
1405         phy_read_status(net->phydev);
1406
1407         return net->phydev->link;
1408 }
1409
1410 static int lan78xx_nway_reset(struct net_device *net)
1411 {
1412         return phy_start_aneg(net->phydev);
1413 }
1414
1415 static void lan78xx_get_drvinfo(struct net_device *net,
1416                                 struct ethtool_drvinfo *info)
1417 {
1418         struct lan78xx_net *dev = netdev_priv(net);
1419
1420         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1421         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1422         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1423 }
1424
1425 static u32 lan78xx_get_msglevel(struct net_device *net)
1426 {
1427         struct lan78xx_net *dev = netdev_priv(net);
1428
1429         return dev->msg_enable;
1430 }
1431
1432 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1433 {
1434         struct lan78xx_net *dev = netdev_priv(net);
1435
1436         dev->msg_enable = level;
1437 }
1438
1439 static int lan78xx_get_mdix_status(struct net_device *net)
1440 {
1441         struct phy_device *phydev = net->phydev;
1442         int buf;
1443
1444         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1445         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1446         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1447
1448         return buf;
1449 }
1450
1451 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1452 {
1453         struct lan78xx_net *dev = netdev_priv(net);
1454         struct phy_device *phydev = net->phydev;
1455         int buf;
1456
1457         if (mdix_ctrl == ETH_TP_MDI) {
1458                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1459                           LAN88XX_EXT_PAGE_SPACE_1);
1460                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1461                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1462                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1463                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1464                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1465                           LAN88XX_EXT_PAGE_SPACE_0);
1466         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1467                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1468                           LAN88XX_EXT_PAGE_SPACE_1);
1469                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1470                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1471                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1472                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1473                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1474                           LAN88XX_EXT_PAGE_SPACE_0);
1475         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1476                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1477                           LAN88XX_EXT_PAGE_SPACE_1);
1478                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1479                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1480                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1481                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1482                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1483                           LAN88XX_EXT_PAGE_SPACE_0);
1484         }
1485         dev->mdix_ctrl = mdix_ctrl;
1486 }
1487
1488 static int lan78xx_get_link_ksettings(struct net_device *net,
1489                                       struct ethtool_link_ksettings *cmd)
1490 {
1491         struct lan78xx_net *dev = netdev_priv(net);
1492         struct phy_device *phydev = net->phydev;
1493         int ret;
1494         int buf;
1495
1496         ret = usb_autopm_get_interface(dev->intf);
1497         if (ret < 0)
1498                 return ret;
1499
1500         ret = phy_ethtool_ksettings_get(phydev, cmd);
1501
1502         buf = lan78xx_get_mdix_status(net);
1503
1504         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1505         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1506                 cmd->base.eth_tp_mdix = ETH_TP_MDI_AUTO;
1507                 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1508         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1509                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
1510                 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
1511         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1512                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
1513                 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1514         }
1515
1516         usb_autopm_put_interface(dev->intf);
1517
1518         return ret;
1519 }
1520
1521 static int lan78xx_set_link_ksettings(struct net_device *net,
1522                                       const struct ethtool_link_ksettings *cmd)
1523 {
1524         struct lan78xx_net *dev = netdev_priv(net);
1525         struct phy_device *phydev = net->phydev;
1526         int ret = 0;
1527         int temp;
1528
1529         ret = usb_autopm_get_interface(dev->intf);
1530         if (ret < 0)
1531                 return ret;
1532
1533         if (dev->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
1534                 lan78xx_set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
1535
1536         /* change speed & duplex */
1537         ret = phy_ethtool_ksettings_set(phydev, cmd);
1538
1539         if (!cmd->base.autoneg) {
1540                 /* force link down */
1541                 temp = phy_read(phydev, MII_BMCR);
1542                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1543                 mdelay(1);
1544                 phy_write(phydev, MII_BMCR, temp);
1545         }
1546
1547         usb_autopm_put_interface(dev->intf);
1548
1549         return ret;
1550 }
1551
1552 static void lan78xx_get_pause(struct net_device *net,
1553                               struct ethtool_pauseparam *pause)
1554 {
1555         struct lan78xx_net *dev = netdev_priv(net);
1556         struct phy_device *phydev = net->phydev;
1557         struct ethtool_link_ksettings ecmd;
1558
1559         phy_ethtool_ksettings_get(phydev, &ecmd);
1560
1561         pause->autoneg = dev->fc_autoneg;
1562
1563         if (dev->fc_request_control & FLOW_CTRL_TX)
1564                 pause->tx_pause = 1;
1565
1566         if (dev->fc_request_control & FLOW_CTRL_RX)
1567                 pause->rx_pause = 1;
1568 }
1569
1570 static int lan78xx_set_pause(struct net_device *net,
1571                              struct ethtool_pauseparam *pause)
1572 {
1573         struct lan78xx_net *dev = netdev_priv(net);
1574         struct phy_device *phydev = net->phydev;
1575         struct ethtool_link_ksettings ecmd;
1576         int ret;
1577
1578         phy_ethtool_ksettings_get(phydev, &ecmd);
1579
1580         if (pause->autoneg && !ecmd.base.autoneg) {
1581                 ret = -EINVAL;
1582                 goto exit;
1583         }
1584
1585         dev->fc_request_control = 0;
1586         if (pause->rx_pause)
1587                 dev->fc_request_control |= FLOW_CTRL_RX;
1588
1589         if (pause->tx_pause)
1590                 dev->fc_request_control |= FLOW_CTRL_TX;
1591
1592         if (ecmd.base.autoneg) {
1593                 u32 mii_adv;
1594                 u32 advertising;
1595
1596                 ethtool_convert_link_mode_to_legacy_u32(
1597                         &advertising, ecmd.link_modes.advertising);
1598
1599                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1600                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1602
1603                 ethtool_convert_legacy_u32_to_link_mode(
1604                         ecmd.link_modes.advertising, advertising);
1605
1606                 phy_ethtool_ksettings_set(phydev, &ecmd);
1607         }
1608
1609         dev->fc_autoneg = pause->autoneg;
1610
1611         ret = 0;
1612 exit:
1613         return ret;
1614 }
1615
1616 static const struct ethtool_ops lan78xx_ethtool_ops = {
1617         .get_link       = lan78xx_get_link,
1618         .nway_reset     = lan78xx_nway_reset,
1619         .get_drvinfo    = lan78xx_get_drvinfo,
1620         .get_msglevel   = lan78xx_get_msglevel,
1621         .set_msglevel   = lan78xx_set_msglevel,
1622         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1623         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1624         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1625         .get_ethtool_stats = lan78xx_get_stats,
1626         .get_sset_count = lan78xx_get_sset_count,
1627         .get_strings    = lan78xx_get_strings,
1628         .get_wol        = lan78xx_get_wol,
1629         .set_wol        = lan78xx_set_wol,
1630         .get_eee        = lan78xx_get_eee,
1631         .set_eee        = lan78xx_set_eee,
1632         .get_pauseparam = lan78xx_get_pause,
1633         .set_pauseparam = lan78xx_set_pause,
1634         .get_link_ksettings = lan78xx_get_link_ksettings,
1635         .set_link_ksettings = lan78xx_set_link_ksettings,
1636 };
1637
1638 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1639 {
1640         if (!netif_running(netdev))
1641                 return -EINVAL;
1642
1643         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1644 }
1645
1646 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1647 {
1648         u32 addr_lo, addr_hi;
1649         int ret;
1650         u8 addr[6];
1651
1652         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1653         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1654
1655         addr[0] = addr_lo & 0xFF;
1656         addr[1] = (addr_lo >> 8) & 0xFF;
1657         addr[2] = (addr_lo >> 16) & 0xFF;
1658         addr[3] = (addr_lo >> 24) & 0xFF;
1659         addr[4] = addr_hi & 0xFF;
1660         addr[5] = (addr_hi >> 8) & 0xFF;
1661
1662         if (!is_valid_ether_addr(addr)) {
1663                 /* reading mac address from EEPROM or OTP */
1664                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1665                                          addr) == 0) ||
1666                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1667                                       addr) == 0)) {
1668                         if (is_valid_ether_addr(addr)) {
1669                                 /* eeprom values are valid so use them */
1670                                 netif_dbg(dev, ifup, dev->net,
1671                                           "MAC address read from EEPROM");
1672                         } else {
1673                                 /* generate random MAC */
1674                                 random_ether_addr(addr);
1675                                 netif_dbg(dev, ifup, dev->net,
1676                                           "MAC address set to random addr");
1677                         }
1678
1679                         addr_lo = addr[0] | (addr[1] << 8) |
1680                                   (addr[2] << 16) | (addr[3] << 24);
1681                         addr_hi = addr[4] | (addr[5] << 8);
1682
1683                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1684                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1685                 } else {
1686                         /* generate random MAC */
1687                         random_ether_addr(addr);
1688                         netif_dbg(dev, ifup, dev->net,
1689                                   "MAC address set to random addr");
1690                 }
1691         }
1692
1693         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1694         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1695
1696         ether_addr_copy(dev->net->dev_addr, addr);
1697 }
1698
1699 /* MDIO read and write wrappers for phylib */
1700 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1701 {
1702         struct lan78xx_net *dev = bus->priv;
1703         u32 val, addr;
1704         int ret;
1705
1706         ret = usb_autopm_get_interface(dev->intf);
1707         if (ret < 0)
1708                 return ret;
1709
1710         mutex_lock(&dev->phy_mutex);
1711
1712         /* confirm MII not busy */
1713         ret = lan78xx_phy_wait_not_busy(dev);
1714         if (ret < 0)
1715                 goto done;
1716
1717         /* set the address, index & direction (read from PHY) */
1718         addr = mii_access(phy_id, idx, MII_READ);
1719         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1720
1721         ret = lan78xx_phy_wait_not_busy(dev);
1722         if (ret < 0)
1723                 goto done;
1724
1725         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1726
1727         ret = (int)(val & 0xFFFF);
1728
1729 done:
1730         mutex_unlock(&dev->phy_mutex);
1731         usb_autopm_put_interface(dev->intf);
1732         return ret;
1733 }
1734
1735 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1736                                  u16 regval)
1737 {
1738         struct lan78xx_net *dev = bus->priv;
1739         u32 val, addr;
1740         int ret;
1741
1742         ret = usb_autopm_get_interface(dev->intf);
1743         if (ret < 0)
1744                 return ret;
1745
1746         mutex_lock(&dev->phy_mutex);
1747
1748         /* confirm MII not busy */
1749         ret = lan78xx_phy_wait_not_busy(dev);
1750         if (ret < 0)
1751                 goto done;
1752
1753         val = (u32)regval;
1754         ret = lan78xx_write_reg(dev, MII_DATA, val);
1755
1756         /* set the address, index & direction (write to PHY) */
1757         addr = mii_access(phy_id, idx, MII_WRITE);
1758         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1759
1760         ret = lan78xx_phy_wait_not_busy(dev);
1761         if (ret < 0)
1762                 goto done;
1763
1764 done:
1765         mutex_unlock(&dev->phy_mutex);
1766         usb_autopm_put_interface(dev->intf);
1767         return 0;
1768 }
1769
1770 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1771 {
1772         int ret;
1773
1774         dev->mdiobus = mdiobus_alloc();
1775         if (!dev->mdiobus) {
1776                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1777                 return -ENOMEM;
1778         }
1779
1780         dev->mdiobus->priv = (void *)dev;
1781         dev->mdiobus->read = lan78xx_mdiobus_read;
1782         dev->mdiobus->write = lan78xx_mdiobus_write;
1783         dev->mdiobus->name = "lan78xx-mdiobus";
1784
1785         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1786                  dev->udev->bus->busnum, dev->udev->devnum);
1787
1788         switch (dev->chipid) {
1789         case ID_REV_CHIP_ID_7800_:
1790         case ID_REV_CHIP_ID_7850_:
1791                 /* set to internal PHY id */
1792                 dev->mdiobus->phy_mask = ~(1 << 1);
1793                 break;
1794         }
1795
1796         ret = mdiobus_register(dev->mdiobus);
1797         if (ret) {
1798                 netdev_err(dev->net, "can't register MDIO bus\n");
1799                 goto exit1;
1800         }
1801
1802         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1803         return 0;
1804 exit1:
1805         mdiobus_free(dev->mdiobus);
1806         return ret;
1807 }
1808
1809 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1810 {
1811         mdiobus_unregister(dev->mdiobus);
1812         mdiobus_free(dev->mdiobus);
1813 }
1814
1815 static void lan78xx_link_status_change(struct net_device *net)
1816 {
1817         struct phy_device *phydev = net->phydev;
1818         int ret, temp;
1819
1820         /* At forced 100 F/H mode, chip may fail to set mode correctly
1821          * when cable is switched between long(~50+m) and short one.
1822          * As workaround, set to 10 before setting to 100
1823          * at forced 100 F/H mode.
1824          */
1825         if (!phydev->autoneg && (phydev->speed == 100)) {
1826                 /* disable phy interrupt */
1827                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1828                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1829                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1830
1831                 temp = phy_read(phydev, MII_BMCR);
1832                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1833                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1834                 temp |= BMCR_SPEED100;
1835                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1836
1837                 /* clear pending interrupt generated while workaround */
1838                 temp = phy_read(phydev, LAN88XX_INT_STS);
1839
1840                 /* enable phy interrupt back */
1841                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1842                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1843                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1844         }
1845 }
1846
1847 static int lan78xx_phy_init(struct lan78xx_net *dev)
1848 {
1849         int ret;
1850         u32 mii_adv;
1851         struct phy_device *phydev = dev->net->phydev;
1852
1853         phydev = phy_find_first(dev->mdiobus);
1854         if (!phydev) {
1855                 netdev_err(dev->net, "no PHY found\n");
1856                 return -EIO;
1857         }
1858
1859         /* Enable PHY interrupts.
1860          * We handle our own interrupt
1861          */
1862         ret = phy_read(phydev, LAN88XX_INT_STS);
1863         ret = phy_write(phydev, LAN88XX_INT_MASK,
1864                         LAN88XX_INT_MASK_MDINTPIN_EN_ |
1865                         LAN88XX_INT_MASK_LINK_CHANGE_);
1866
1867         phydev->irq = PHY_IGNORE_INTERRUPT;
1868
1869         ret = phy_connect_direct(dev->net, phydev,
1870                                  lan78xx_link_status_change,
1871                                  PHY_INTERFACE_MODE_GMII);
1872         if (ret) {
1873                 netdev_err(dev->net, "can't attach PHY to %s\n",
1874                            dev->mdiobus->id);
1875                 return -EIO;
1876         }
1877
1878         /* set to AUTOMDIX */
1879         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1880
1881         /* MAC doesn't support 1000T Half */
1882         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1883
1884         /* support both flow controls */
1885         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1886         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1887         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1888         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1889
1890         genphy_config_aneg(phydev);
1891
1892         dev->fc_autoneg = phydev->autoneg;
1893
1894         phy_start(phydev);
1895
1896         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1897
1898         return 0;
1899 }
1900
1901 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1902 {
1903         int ret = 0;
1904         u32 buf;
1905         bool rxenabled;
1906
1907         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1908
1909         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1910
1911         if (rxenabled) {
1912                 buf &= ~MAC_RX_RXEN_;
1913                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1914         }
1915
1916         /* add 4 to size for FCS */
1917         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1918         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1919
1920         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1921
1922         if (rxenabled) {
1923                 buf |= MAC_RX_RXEN_;
1924                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1925         }
1926
1927         return 0;
1928 }
1929
1930 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1931 {
1932         struct sk_buff *skb;
1933         unsigned long flags;
1934         int count = 0;
1935
1936         spin_lock_irqsave(&q->lock, flags);
1937         while (!skb_queue_empty(q)) {
1938                 struct skb_data *entry;
1939                 struct urb *urb;
1940                 int ret;
1941
1942                 skb_queue_walk(q, skb) {
1943                         entry = (struct skb_data *)skb->cb;
1944                         if (entry->state != unlink_start)
1945                                 goto found;
1946                 }
1947                 break;
1948 found:
1949                 entry->state = unlink_start;
1950                 urb = entry->urb;
1951
1952                 /* Get reference count of the URB to avoid it to be
1953                  * freed during usb_unlink_urb, which may trigger
1954                  * use-after-free problem inside usb_unlink_urb since
1955                  * usb_unlink_urb is always racing with .complete
1956                  * handler(include defer_bh).
1957                  */
1958                 usb_get_urb(urb);
1959                 spin_unlock_irqrestore(&q->lock, flags);
1960                 /* during some PM-driven resume scenarios,
1961                  * these (async) unlinks complete immediately
1962                  */
1963                 ret = usb_unlink_urb(urb);
1964                 if (ret != -EINPROGRESS && ret != 0)
1965                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1966                 else
1967                         count++;
1968                 usb_put_urb(urb);
1969                 spin_lock_irqsave(&q->lock, flags);
1970         }
1971         spin_unlock_irqrestore(&q->lock, flags);
1972         return count;
1973 }
1974
1975 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1976 {
1977         struct lan78xx_net *dev = netdev_priv(netdev);
1978         int ll_mtu = new_mtu + netdev->hard_header_len;
1979         int old_hard_mtu = dev->hard_mtu;
1980         int old_rx_urb_size = dev->rx_urb_size;
1981         int ret;
1982
1983         /* no second zero-length packet read wanted after mtu-sized packets */
1984         if ((ll_mtu % dev->maxpacket) == 0)
1985                 return -EDOM;
1986
1987         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1988
1989         netdev->mtu = new_mtu;
1990
1991         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1992         if (dev->rx_urb_size == old_hard_mtu) {
1993                 dev->rx_urb_size = dev->hard_mtu;
1994                 if (dev->rx_urb_size > old_rx_urb_size) {
1995                         if (netif_running(dev->net)) {
1996                                 unlink_urbs(dev, &dev->rxq);
1997                                 tasklet_schedule(&dev->bh);
1998                         }
1999                 }
2000         }
2001
2002         return 0;
2003 }
2004
2005 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2006 {
2007         struct lan78xx_net *dev = netdev_priv(netdev);
2008         struct sockaddr *addr = p;
2009         u32 addr_lo, addr_hi;
2010         int ret;
2011
2012         if (netif_running(netdev))
2013                 return -EBUSY;
2014
2015         if (!is_valid_ether_addr(addr->sa_data))
2016                 return -EADDRNOTAVAIL;
2017
2018         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2019
2020         addr_lo = netdev->dev_addr[0] |
2021                   netdev->dev_addr[1] << 8 |
2022                   netdev->dev_addr[2] << 16 |
2023                   netdev->dev_addr[3] << 24;
2024         addr_hi = netdev->dev_addr[4] |
2025                   netdev->dev_addr[5] << 8;
2026
2027         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2028         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2029
2030         return 0;
2031 }
2032
2033 /* Enable or disable Rx checksum offload engine */
2034 static int lan78xx_set_features(struct net_device *netdev,
2035                                 netdev_features_t features)
2036 {
2037         struct lan78xx_net *dev = netdev_priv(netdev);
2038         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2039         unsigned long flags;
2040         int ret;
2041
2042         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2043
2044         if (features & NETIF_F_RXCSUM) {
2045                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2046                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2047         } else {
2048                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2049                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2050         }
2051
2052         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2053                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2054         else
2055                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2056
2057         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2058
2059         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2060
2061         return 0;
2062 }
2063
2064 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2065 {
2066         struct lan78xx_priv *pdata =
2067                         container_of(param, struct lan78xx_priv, set_vlan);
2068         struct lan78xx_net *dev = pdata->dev;
2069
2070         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2071                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2072 }
2073
2074 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2075                                    __be16 proto, u16 vid)
2076 {
2077         struct lan78xx_net *dev = netdev_priv(netdev);
2078         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2079         u16 vid_bit_index;
2080         u16 vid_dword_index;
2081
2082         vid_dword_index = (vid >> 5) & 0x7F;
2083         vid_bit_index = vid & 0x1F;
2084
2085         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2086
2087         /* defer register writes to a sleepable context */
2088         schedule_work(&pdata->set_vlan);
2089
2090         return 0;
2091 }
2092
2093 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2094                                     __be16 proto, u16 vid)
2095 {
2096         struct lan78xx_net *dev = netdev_priv(netdev);
2097         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2098         u16 vid_bit_index;
2099         u16 vid_dword_index;
2100
2101         vid_dword_index = (vid >> 5) & 0x7F;
2102         vid_bit_index = vid & 0x1F;
2103
2104         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2105
2106         /* defer register writes to a sleepable context */
2107         schedule_work(&pdata->set_vlan);
2108
2109         return 0;
2110 }
2111
2112 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2113 {
2114         int ret;
2115         u32 buf;
2116         u32 regs[6] = { 0 };
2117
2118         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2119         if (buf & USB_CFG1_LTM_ENABLE_) {
2120                 u8 temp[2];
2121                 /* Get values from EEPROM first */
2122                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2123                         if (temp[0] == 24) {
2124                                 ret = lan78xx_read_raw_eeprom(dev,
2125                                                               temp[1] * 2,
2126                                                               24,
2127                                                               (u8 *)regs);
2128                                 if (ret < 0)
2129                                         return;
2130                         }
2131                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2132                         if (temp[0] == 24) {
2133                                 ret = lan78xx_read_raw_otp(dev,
2134                                                            temp[1] * 2,
2135                                                            24,
2136                                                            (u8 *)regs);
2137                                 if (ret < 0)
2138                                         return;
2139                         }
2140                 }
2141         }
2142
2143         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2144         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2145         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2146         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2147         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2148         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2149 }
2150
2151 static int lan78xx_reset(struct lan78xx_net *dev)
2152 {
2153         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2154         u32 buf;
2155         int ret = 0;
2156         unsigned long timeout;
2157
2158         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2159         buf |= HW_CFG_LRST_;
2160         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2161
2162         timeout = jiffies + HZ;
2163         do {
2164                 mdelay(1);
2165                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2166                 if (time_after(jiffies, timeout)) {
2167                         netdev_warn(dev->net,
2168                                     "timeout on completion of LiteReset");
2169                         return -EIO;
2170                 }
2171         } while (buf & HW_CFG_LRST_);
2172
2173         lan78xx_init_mac_address(dev);
2174
2175         /* save DEVID for later usage */
2176         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2177         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2178         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2179
2180         /* Respond to the IN token with a NAK */
2181         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2182         buf |= USB_CFG_BIR_;
2183         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2184
2185         /* Init LTM */
2186         lan78xx_init_ltm(dev);
2187
2188         dev->net->hard_header_len += TX_OVERHEAD;
2189         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2190
2191         if (dev->udev->speed == USB_SPEED_SUPER) {
2192                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2193                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2194                 dev->rx_qlen = 4;
2195                 dev->tx_qlen = 4;
2196         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2197                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2198                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2199                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2200                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2201         } else {
2202                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2203                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2204                 dev->rx_qlen = 4;
2205         }
2206
2207         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2208         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2209
2210         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2211         buf |= HW_CFG_MEF_;
2212         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2213
2214         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2215         buf |= USB_CFG_BCE_;
2216         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2217
2218         /* set FIFO sizes */
2219         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2220         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2221
2222         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2223         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2224
2225         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2226         ret = lan78xx_write_reg(dev, FLOW, 0);
2227         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2228
2229         /* Don't need rfe_ctl_lock during initialisation */
2230         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2231         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2232         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2233
2234         /* Enable or disable checksum offload engines */
2235         lan78xx_set_features(dev->net, dev->net->features);
2236
2237         lan78xx_set_multicast(dev->net);
2238
2239         /* reset PHY */
2240         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2241         buf |= PMT_CTL_PHY_RST_;
2242         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2243
2244         timeout = jiffies + HZ;
2245         do {
2246                 mdelay(1);
2247                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2248                 if (time_after(jiffies, timeout)) {
2249                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2250                         return -EIO;
2251                 }
2252         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2253
2254         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2255         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2256         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2257
2258         /* enable PHY interrupts */
2259         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2260         buf |= INT_ENP_PHY_INT;
2261         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2262
2263         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2264         buf |= MAC_TX_TXEN_;
2265         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2266
2267         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2268         buf |= FCT_TX_CTL_EN_;
2269         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2270
2271         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2272
2273         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2274         buf |= MAC_RX_RXEN_;
2275         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2276
2277         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2278         buf |= FCT_RX_CTL_EN_;
2279         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2280
2281         return 0;
2282 }
2283
2284 static void lan78xx_init_stats(struct lan78xx_net *dev)
2285 {
2286         u32 *p;
2287         int i;
2288
2289         /* initialize for stats update
2290          * some counters are 20bits and some are 32bits
2291          */
2292         p = (u32 *)&dev->stats.rollover_max;
2293         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2294                 p[i] = 0xFFFFF;
2295
2296         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2297         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2298         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2299         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2300         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2301         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2302         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2303         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2304         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2305         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2306
2307         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2308 }
2309
2310 static int lan78xx_open(struct net_device *net)
2311 {
2312         struct lan78xx_net *dev = netdev_priv(net);
2313         int ret;
2314
2315         ret = usb_autopm_get_interface(dev->intf);
2316         if (ret < 0)
2317                 goto out;
2318
2319         ret = lan78xx_reset(dev);
2320         if (ret < 0)
2321                 goto done;
2322
2323         ret = lan78xx_phy_init(dev);
2324         if (ret < 0)
2325                 goto done;
2326
2327         /* for Link Check */
2328         if (dev->urb_intr) {
2329                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2330                 if (ret < 0) {
2331                         netif_err(dev, ifup, dev->net,
2332                                   "intr submit %d\n", ret);
2333                         goto done;
2334                 }
2335         }
2336
2337         lan78xx_init_stats(dev);
2338
2339         set_bit(EVENT_DEV_OPEN, &dev->flags);
2340
2341         netif_start_queue(net);
2342
2343         dev->link_on = false;
2344
2345         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2346 done:
2347         usb_autopm_put_interface(dev->intf);
2348
2349 out:
2350         return ret;
2351 }
2352
2353 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2354 {
2355         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2356         DECLARE_WAITQUEUE(wait, current);
2357         int temp;
2358
2359         /* ensure there are no more active urbs */
2360         add_wait_queue(&unlink_wakeup, &wait);
2361         set_current_state(TASK_UNINTERRUPTIBLE);
2362         dev->wait = &unlink_wakeup;
2363         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2364
2365         /* maybe wait for deletions to finish. */
2366         while (!skb_queue_empty(&dev->rxq) &&
2367                !skb_queue_empty(&dev->txq) &&
2368                !skb_queue_empty(&dev->done)) {
2369                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2370                 set_current_state(TASK_UNINTERRUPTIBLE);
2371                 netif_dbg(dev, ifdown, dev->net,
2372                           "waited for %d urb completions\n", temp);
2373         }
2374         set_current_state(TASK_RUNNING);
2375         dev->wait = NULL;
2376         remove_wait_queue(&unlink_wakeup, &wait);
2377 }
2378
2379 static int lan78xx_stop(struct net_device *net)
2380 {
2381         struct lan78xx_net              *dev = netdev_priv(net);
2382
2383         if (timer_pending(&dev->stat_monitor))
2384                 del_timer_sync(&dev->stat_monitor);
2385
2386         phy_stop(net->phydev);
2387         phy_disconnect(net->phydev);
2388         net->phydev = NULL;
2389
2390         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2391         netif_stop_queue(net);
2392
2393         netif_info(dev, ifdown, dev->net,
2394                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2395                    net->stats.rx_packets, net->stats.tx_packets,
2396                    net->stats.rx_errors, net->stats.tx_errors);
2397
2398         lan78xx_terminate_urbs(dev);
2399
2400         usb_kill_urb(dev->urb_intr);
2401
2402         skb_queue_purge(&dev->rxq_pause);
2403
2404         /* deferred work (task, timer, softirq) must also stop.
2405          * can't flush_scheduled_work() until we drop rtnl (later),
2406          * else workers could deadlock; so make workers a NOP.
2407          */
2408         dev->flags = 0;
2409         cancel_delayed_work_sync(&dev->wq);
2410         tasklet_kill(&dev->bh);
2411
2412         usb_autopm_put_interface(dev->intf);
2413
2414         return 0;
2415 }
2416
2417 static int lan78xx_linearize(struct sk_buff *skb)
2418 {
2419         return skb_linearize(skb);
2420 }
2421
2422 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2423                                        struct sk_buff *skb, gfp_t flags)
2424 {
2425         u32 tx_cmd_a, tx_cmd_b;
2426
2427         if (skb_headroom(skb) < TX_OVERHEAD) {
2428                 struct sk_buff *skb2;
2429
2430                 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2431                 dev_kfree_skb_any(skb);
2432                 skb = skb2;
2433                 if (!skb)
2434                         return NULL;
2435         }
2436
2437         if (lan78xx_linearize(skb) < 0)
2438                 return NULL;
2439
2440         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2441
2442         if (skb->ip_summed == CHECKSUM_PARTIAL)
2443                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2444
2445         tx_cmd_b = 0;
2446         if (skb_is_gso(skb)) {
2447                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2448
2449                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2450
2451                 tx_cmd_a |= TX_CMD_A_LSO_;
2452         }
2453
2454         if (skb_vlan_tag_present(skb)) {
2455                 tx_cmd_a |= TX_CMD_A_IVTG_;
2456                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2457         }
2458
2459         skb_push(skb, 4);
2460         cpu_to_le32s(&tx_cmd_b);
2461         memcpy(skb->data, &tx_cmd_b, 4);
2462
2463         skb_push(skb, 4);
2464         cpu_to_le32s(&tx_cmd_a);
2465         memcpy(skb->data, &tx_cmd_a, 4);
2466
2467         return skb;
2468 }
2469
2470 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2471                                struct sk_buff_head *list, enum skb_state state)
2472 {
2473         unsigned long flags;
2474         enum skb_state old_state;
2475         struct skb_data *entry = (struct skb_data *)skb->cb;
2476
2477         spin_lock_irqsave(&list->lock, flags);
2478         old_state = entry->state;
2479         entry->state = state;
2480
2481         __skb_unlink(skb, list);
2482         spin_unlock(&list->lock);
2483         spin_lock(&dev->done.lock);
2484
2485         __skb_queue_tail(&dev->done, skb);
2486         if (skb_queue_len(&dev->done) == 1)
2487                 tasklet_schedule(&dev->bh);
2488         spin_unlock_irqrestore(&dev->done.lock, flags);
2489
2490         return old_state;
2491 }
2492
2493 static void tx_complete(struct urb *urb)
2494 {
2495         struct sk_buff *skb = (struct sk_buff *)urb->context;
2496         struct skb_data *entry = (struct skb_data *)skb->cb;
2497         struct lan78xx_net *dev = entry->dev;
2498
2499         if (urb->status == 0) {
2500                 dev->net->stats.tx_packets += entry->num_of_packet;
2501                 dev->net->stats.tx_bytes += entry->length;
2502         } else {
2503                 dev->net->stats.tx_errors++;
2504
2505                 switch (urb->status) {
2506                 case -EPIPE:
2507                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2508                         break;
2509
2510                 /* software-driven interface shutdown */
2511                 case -ECONNRESET:
2512                 case -ESHUTDOWN:
2513                         break;
2514
2515                 case -EPROTO:
2516                 case -ETIME:
2517                 case -EILSEQ:
2518                         netif_stop_queue(dev->net);
2519                         break;
2520                 default:
2521                         netif_dbg(dev, tx_err, dev->net,
2522                                   "tx err %d\n", entry->urb->status);
2523                         break;
2524                 }
2525         }
2526
2527         usb_autopm_put_interface_async(dev->intf);
2528
2529         defer_bh(dev, skb, &dev->txq, tx_done);
2530 }
2531
2532 static void lan78xx_queue_skb(struct sk_buff_head *list,
2533                               struct sk_buff *newsk, enum skb_state state)
2534 {
2535         struct skb_data *entry = (struct skb_data *)newsk->cb;
2536
2537         __skb_queue_tail(list, newsk);
2538         entry->state = state;
2539 }
2540
2541 static netdev_tx_t
2542 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2543 {
2544         struct lan78xx_net *dev = netdev_priv(net);
2545         struct sk_buff *skb2 = NULL;
2546
2547         if (skb) {
2548                 skb_tx_timestamp(skb);
2549                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2550         }
2551
2552         if (skb2) {
2553                 skb_queue_tail(&dev->txq_pend, skb2);
2554
2555                 /* throttle TX patch at slower than SUPER SPEED USB */
2556                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2557                     (skb_queue_len(&dev->txq_pend) > 10))
2558                         netif_stop_queue(net);
2559         } else {
2560                 netif_dbg(dev, tx_err, dev->net,
2561                           "lan78xx_tx_prep return NULL\n");
2562                 dev->net->stats.tx_errors++;
2563                 dev->net->stats.tx_dropped++;
2564         }
2565
2566         tasklet_schedule(&dev->bh);
2567
2568         return NETDEV_TX_OK;
2569 }
2570
2571 static int
2572 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2573 {
2574         int tmp;
2575         struct usb_host_interface *alt = NULL;
2576         struct usb_host_endpoint *in = NULL, *out = NULL;
2577         struct usb_host_endpoint *status = NULL;
2578
2579         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2580                 unsigned ep;
2581
2582                 in = NULL;
2583                 out = NULL;
2584                 status = NULL;
2585                 alt = intf->altsetting + tmp;
2586
2587                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2588                         struct usb_host_endpoint *e;
2589                         int intr = 0;
2590
2591                         e = alt->endpoint + ep;
2592                         switch (e->desc.bmAttributes) {
2593                         case USB_ENDPOINT_XFER_INT:
2594                                 if (!usb_endpoint_dir_in(&e->desc))
2595                                         continue;
2596                                 intr = 1;
2597                                 /* FALLTHROUGH */
2598                         case USB_ENDPOINT_XFER_BULK:
2599                                 break;
2600                         default:
2601                                 continue;
2602                         }
2603                         if (usb_endpoint_dir_in(&e->desc)) {
2604                                 if (!intr && !in)
2605                                         in = e;
2606                                 else if (intr && !status)
2607                                         status = e;
2608                         } else {
2609                                 if (!out)
2610                                         out = e;
2611                         }
2612                 }
2613                 if (in && out)
2614                         break;
2615         }
2616         if (!alt || !in || !out)
2617                 return -EINVAL;
2618
2619         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2620                                        in->desc.bEndpointAddress &
2621                                        USB_ENDPOINT_NUMBER_MASK);
2622         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2623                                         out->desc.bEndpointAddress &
2624                                         USB_ENDPOINT_NUMBER_MASK);
2625         dev->ep_intr = status;
2626
2627         return 0;
2628 }
2629
2630 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2631 {
2632         struct lan78xx_priv *pdata = NULL;
2633         int ret;
2634         int i;
2635
2636         ret = lan78xx_get_endpoints(dev, intf);
2637
2638         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2639
2640         pdata = (struct lan78xx_priv *)(dev->data[0]);
2641         if (!pdata) {
2642                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2643                 return -ENOMEM;
2644         }
2645
2646         pdata->dev = dev;
2647
2648         spin_lock_init(&pdata->rfe_ctl_lock);
2649         mutex_init(&pdata->dataport_mutex);
2650
2651         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2652
2653         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2654                 pdata->vlan_table[i] = 0;
2655
2656         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2657
2658         dev->net->features = 0;
2659
2660         if (DEFAULT_TX_CSUM_ENABLE)
2661                 dev->net->features |= NETIF_F_HW_CSUM;
2662
2663         if (DEFAULT_RX_CSUM_ENABLE)
2664                 dev->net->features |= NETIF_F_RXCSUM;
2665
2666         if (DEFAULT_TSO_CSUM_ENABLE)
2667                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2668
2669         dev->net->hw_features = dev->net->features;
2670
2671         /* Init all registers */
2672         ret = lan78xx_reset(dev);
2673
2674         lan78xx_mdio_init(dev);
2675
2676         dev->net->flags |= IFF_MULTICAST;
2677
2678         pdata->wol = WAKE_MAGIC;
2679
2680         return 0;
2681 }
2682
2683 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2684 {
2685         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2686
2687         lan78xx_remove_mdio(dev);
2688
2689         if (pdata) {
2690                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2691                 kfree(pdata);
2692                 pdata = NULL;
2693                 dev->data[0] = 0;
2694         }
2695 }
2696
2697 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2698                                     struct sk_buff *skb,
2699                                     u32 rx_cmd_a, u32 rx_cmd_b)
2700 {
2701         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2702             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2703                 skb->ip_summed = CHECKSUM_NONE;
2704         } else {
2705                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2706                 skb->ip_summed = CHECKSUM_COMPLETE;
2707         }
2708 }
2709
2710 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2711 {
2712         int             status;
2713
2714         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2715                 skb_queue_tail(&dev->rxq_pause, skb);
2716                 return;
2717         }
2718
2719         dev->net->stats.rx_packets++;
2720         dev->net->stats.rx_bytes += skb->len;
2721
2722         skb->protocol = eth_type_trans(skb, dev->net);
2723
2724         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2725                   skb->len + sizeof(struct ethhdr), skb->protocol);
2726         memset(skb->cb, 0, sizeof(struct skb_data));
2727
2728         if (skb_defer_rx_timestamp(skb))
2729                 return;
2730
2731         status = netif_rx(skb);
2732         if (status != NET_RX_SUCCESS)
2733                 netif_dbg(dev, rx_err, dev->net,
2734                           "netif_rx status %d\n", status);
2735 }
2736
2737 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2738 {
2739         if (skb->len < dev->net->hard_header_len)
2740                 return 0;
2741
2742         while (skb->len > 0) {
2743                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2744                 u16 rx_cmd_c;
2745                 struct sk_buff *skb2;
2746                 unsigned char *packet;
2747
2748                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2749                 le32_to_cpus(&rx_cmd_a);
2750                 skb_pull(skb, sizeof(rx_cmd_a));
2751
2752                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2753                 le32_to_cpus(&rx_cmd_b);
2754                 skb_pull(skb, sizeof(rx_cmd_b));
2755
2756                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2757                 le16_to_cpus(&rx_cmd_c);
2758                 skb_pull(skb, sizeof(rx_cmd_c));
2759
2760                 packet = skb->data;
2761
2762                 /* get the packet length */
2763                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2764                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2765
2766                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2767                         netif_dbg(dev, rx_err, dev->net,
2768                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2769                 } else {
2770                         /* last frame in this batch */
2771                         if (skb->len == size) {
2772                                 lan78xx_rx_csum_offload(dev, skb,
2773                                                         rx_cmd_a, rx_cmd_b);
2774
2775                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2776                                 skb->truesize = size + sizeof(struct sk_buff);
2777
2778                                 return 1;
2779                         }
2780
2781                         skb2 = skb_clone(skb, GFP_ATOMIC);
2782                         if (unlikely(!skb2)) {
2783                                 netdev_warn(dev->net, "Error allocating skb");
2784                                 return 0;
2785                         }
2786
2787                         skb2->len = size;
2788                         skb2->data = packet;
2789                         skb_set_tail_pointer(skb2, size);
2790
2791                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2792
2793                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2794                         skb2->truesize = size + sizeof(struct sk_buff);
2795
2796                         lan78xx_skb_return(dev, skb2);
2797                 }
2798
2799                 skb_pull(skb, size);
2800
2801                 /* padding bytes before the next frame starts */
2802                 if (skb->len)
2803                         skb_pull(skb, align_count);
2804         }
2805
2806         return 1;
2807 }
2808
2809 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2810 {
2811         if (!lan78xx_rx(dev, skb)) {
2812                 dev->net->stats.rx_errors++;
2813                 goto done;
2814         }
2815
2816         if (skb->len) {
2817                 lan78xx_skb_return(dev, skb);
2818                 return;
2819         }
2820
2821         netif_dbg(dev, rx_err, dev->net, "drop\n");
2822         dev->net->stats.rx_errors++;
2823 done:
2824         skb_queue_tail(&dev->done, skb);
2825 }
2826
2827 static void rx_complete(struct urb *urb);
2828
2829 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2830 {
2831         struct sk_buff *skb;
2832         struct skb_data *entry;
2833         unsigned long lockflags;
2834         size_t size = dev->rx_urb_size;
2835         int ret = 0;
2836
2837         skb = netdev_alloc_skb_ip_align(dev->net, size);
2838         if (!skb) {
2839                 usb_free_urb(urb);
2840                 return -ENOMEM;
2841         }
2842
2843         entry = (struct skb_data *)skb->cb;
2844         entry->urb = urb;
2845         entry->dev = dev;
2846         entry->length = 0;
2847
2848         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2849                           skb->data, size, rx_complete, skb);
2850
2851         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2852
2853         if (netif_device_present(dev->net) &&
2854             netif_running(dev->net) &&
2855             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2856             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2857                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2858                 switch (ret) {
2859                 case 0:
2860                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2861                         break;
2862                 case -EPIPE:
2863                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2864                         break;
2865                 case -ENODEV:
2866                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2867                         netif_device_detach(dev->net);
2868                         break;
2869                 case -EHOSTUNREACH:
2870                         ret = -ENOLINK;
2871                         break;
2872                 default:
2873                         netif_dbg(dev, rx_err, dev->net,
2874                                   "rx submit, %d\n", ret);
2875                         tasklet_schedule(&dev->bh);
2876                 }
2877         } else {
2878                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2879                 ret = -ENOLINK;
2880         }
2881         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2882         if (ret) {
2883                 dev_kfree_skb_any(skb);
2884                 usb_free_urb(urb);
2885         }
2886         return ret;
2887 }
2888
2889 static void rx_complete(struct urb *urb)
2890 {
2891         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2892         struct skb_data *entry = (struct skb_data *)skb->cb;
2893         struct lan78xx_net *dev = entry->dev;
2894         int urb_status = urb->status;
2895         enum skb_state state;
2896
2897         skb_put(skb, urb->actual_length);
2898         state = rx_done;
2899         entry->urb = NULL;
2900
2901         switch (urb_status) {
2902         case 0:
2903                 if (skb->len < dev->net->hard_header_len) {
2904                         state = rx_cleanup;
2905                         dev->net->stats.rx_errors++;
2906                         dev->net->stats.rx_length_errors++;
2907                         netif_dbg(dev, rx_err, dev->net,
2908                                   "rx length %d\n", skb->len);
2909                 }
2910                 usb_mark_last_busy(dev->udev);
2911                 break;
2912         case -EPIPE:
2913                 dev->net->stats.rx_errors++;
2914                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2915                 /* FALLTHROUGH */
2916         case -ECONNRESET:                               /* async unlink */
2917         case -ESHUTDOWN:                                /* hardware gone */
2918                 netif_dbg(dev, ifdown, dev->net,
2919                           "rx shutdown, code %d\n", urb_status);
2920                 state = rx_cleanup;
2921                 entry->urb = urb;
2922                 urb = NULL;
2923                 break;
2924         case -EPROTO:
2925         case -ETIME:
2926         case -EILSEQ:
2927                 dev->net->stats.rx_errors++;
2928                 state = rx_cleanup;
2929                 entry->urb = urb;
2930                 urb = NULL;
2931                 break;
2932
2933         /* data overrun ... flush fifo? */
2934         case -EOVERFLOW:
2935                 dev->net->stats.rx_over_errors++;
2936                 /* FALLTHROUGH */
2937
2938         default:
2939                 state = rx_cleanup;
2940                 dev->net->stats.rx_errors++;
2941                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2942                 break;
2943         }
2944
2945         state = defer_bh(dev, skb, &dev->rxq, state);
2946
2947         if (urb) {
2948                 if (netif_running(dev->net) &&
2949                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2950                     state != unlink_start) {
2951                         rx_submit(dev, urb, GFP_ATOMIC);
2952                         return;
2953                 }
2954                 usb_free_urb(urb);
2955         }
2956         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2957 }
2958
2959 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2960 {
2961         int length;
2962         struct urb *urb = NULL;
2963         struct skb_data *entry;
2964         unsigned long flags;
2965         struct sk_buff_head *tqp = &dev->txq_pend;
2966         struct sk_buff *skb, *skb2;
2967         int ret;
2968         int count, pos;
2969         int skb_totallen, pkt_cnt;
2970
2971         skb_totallen = 0;
2972         pkt_cnt = 0;
2973         count = 0;
2974         length = 0;
2975         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2976                 if (skb_is_gso(skb)) {
2977                         if (pkt_cnt) {
2978                                 /* handle previous packets first */
2979                                 break;
2980                         }
2981                         count = 1;
2982                         length = skb->len - TX_OVERHEAD;
2983                         skb2 = skb_dequeue(tqp);
2984                         goto gso_skb;
2985                 }
2986
2987                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2988                         break;
2989                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2990                 pkt_cnt++;
2991         }
2992
2993         /* copy to a single skb */
2994         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2995         if (!skb)
2996                 goto drop;
2997
2998         skb_put(skb, skb_totallen);
2999
3000         for (count = pos = 0; count < pkt_cnt; count++) {
3001                 skb2 = skb_dequeue(tqp);
3002                 if (skb2) {
3003                         length += (skb2->len - TX_OVERHEAD);
3004                         memcpy(skb->data + pos, skb2->data, skb2->len);
3005                         pos += roundup(skb2->len, sizeof(u32));
3006                         dev_kfree_skb(skb2);
3007                 }
3008         }
3009
3010 gso_skb:
3011         urb = usb_alloc_urb(0, GFP_ATOMIC);
3012         if (!urb)
3013                 goto drop;
3014
3015         entry = (struct skb_data *)skb->cb;
3016         entry->urb = urb;
3017         entry->dev = dev;
3018         entry->length = length;
3019         entry->num_of_packet = count;
3020
3021         spin_lock_irqsave(&dev->txq.lock, flags);
3022         ret = usb_autopm_get_interface_async(dev->intf);
3023         if (ret < 0) {
3024                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3025                 goto drop;
3026         }
3027
3028         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3029                           skb->data, skb->len, tx_complete, skb);
3030
3031         if (length % dev->maxpacket == 0) {
3032                 /* send USB_ZERO_PACKET */
3033                 urb->transfer_flags |= URB_ZERO_PACKET;
3034         }
3035
3036 #ifdef CONFIG_PM
3037         /* if this triggers the device is still a sleep */
3038         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3039                 /* transmission will be done in resume */
3040                 usb_anchor_urb(urb, &dev->deferred);
3041                 /* no use to process more packets */
3042                 netif_stop_queue(dev->net);
3043                 usb_put_urb(urb);
3044                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3045                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3046                 return;
3047         }
3048 #endif
3049
3050         ret = usb_submit_urb(urb, GFP_ATOMIC);
3051         switch (ret) {
3052         case 0:
3053                 netif_trans_update(dev->net);
3054                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3055                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3056                         netif_stop_queue(dev->net);
3057                 break;
3058         case -EPIPE:
3059                 netif_stop_queue(dev->net);
3060                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3061                 usb_autopm_put_interface_async(dev->intf);
3062                 break;
3063         default:
3064                 usb_autopm_put_interface_async(dev->intf);
3065                 netif_dbg(dev, tx_err, dev->net,
3066                           "tx: submit urb err %d\n", ret);
3067                 break;
3068         }
3069
3070         spin_unlock_irqrestore(&dev->txq.lock, flags);
3071
3072         if (ret) {
3073                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3074 drop:
3075                 dev->net->stats.tx_dropped++;
3076                 if (skb)
3077                         dev_kfree_skb_any(skb);
3078                 usb_free_urb(urb);
3079         } else
3080                 netif_dbg(dev, tx_queued, dev->net,
3081                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3082 }
3083
3084 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3085 {
3086         struct urb *urb;
3087         int i;
3088
3089         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3090                 for (i = 0; i < 10; i++) {
3091                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3092                                 break;
3093                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3094                         if (urb)
3095                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3096                                         return;
3097                 }
3098
3099                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3100                         tasklet_schedule(&dev->bh);
3101         }
3102         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3103                 netif_wake_queue(dev->net);
3104 }
3105
3106 static void lan78xx_bh(unsigned long param)
3107 {
3108         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3109         struct sk_buff *skb;
3110         struct skb_data *entry;
3111
3112         while ((skb = skb_dequeue(&dev->done))) {
3113                 entry = (struct skb_data *)(skb->cb);
3114                 switch (entry->state) {
3115                 case rx_done:
3116                         entry->state = rx_cleanup;
3117                         rx_process(dev, skb);
3118                         continue;
3119                 case tx_done:
3120                         usb_free_urb(entry->urb);
3121                         dev_kfree_skb(skb);
3122                         continue;
3123                 case rx_cleanup:
3124                         usb_free_urb(entry->urb);
3125                         dev_kfree_skb(skb);
3126                         continue;
3127                 default:
3128                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3129                         return;
3130                 }
3131         }
3132
3133         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3134                 /* reset update timer delta */
3135                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3136                         dev->delta = 1;
3137                         mod_timer(&dev->stat_monitor,
3138                                   jiffies + STAT_UPDATE_TIMER);
3139                 }
3140
3141                 if (!skb_queue_empty(&dev->txq_pend))
3142                         lan78xx_tx_bh(dev);
3143
3144                 if (!timer_pending(&dev->delay) &&
3145                     !test_bit(EVENT_RX_HALT, &dev->flags))
3146                         lan78xx_rx_bh(dev);
3147         }
3148 }
3149
3150 static void lan78xx_delayedwork(struct work_struct *work)
3151 {
3152         int status;
3153         struct lan78xx_net *dev;
3154
3155         dev = container_of(work, struct lan78xx_net, wq.work);
3156
3157         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3158                 unlink_urbs(dev, &dev->txq);
3159                 status = usb_autopm_get_interface(dev->intf);
3160                 if (status < 0)
3161                         goto fail_pipe;
3162                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3163                 usb_autopm_put_interface(dev->intf);
3164                 if (status < 0 &&
3165                     status != -EPIPE &&
3166                     status != -ESHUTDOWN) {
3167                         if (netif_msg_tx_err(dev))
3168 fail_pipe:
3169                                 netdev_err(dev->net,
3170                                            "can't clear tx halt, status %d\n",
3171                                            status);
3172                 } else {
3173                         clear_bit(EVENT_TX_HALT, &dev->flags);
3174                         if (status != -ESHUTDOWN)
3175                                 netif_wake_queue(dev->net);
3176                 }
3177         }
3178         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3179                 unlink_urbs(dev, &dev->rxq);
3180                 status = usb_autopm_get_interface(dev->intf);
3181                 if (status < 0)
3182                                 goto fail_halt;
3183                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3184                 usb_autopm_put_interface(dev->intf);
3185                 if (status < 0 &&
3186                     status != -EPIPE &&
3187                     status != -ESHUTDOWN) {
3188                         if (netif_msg_rx_err(dev))
3189 fail_halt:
3190                                 netdev_err(dev->net,
3191                                            "can't clear rx halt, status %d\n",
3192                                            status);
3193                 } else {
3194                         clear_bit(EVENT_RX_HALT, &dev->flags);
3195                         tasklet_schedule(&dev->bh);
3196                 }
3197         }
3198
3199         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3200                 int ret = 0;
3201
3202                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3203                 status = usb_autopm_get_interface(dev->intf);
3204                 if (status < 0)
3205                         goto skip_reset;
3206                 if (lan78xx_link_reset(dev) < 0) {
3207                         usb_autopm_put_interface(dev->intf);
3208 skip_reset:
3209                         netdev_info(dev->net, "link reset failed (%d)\n",
3210                                     ret);
3211                 } else {
3212                         usb_autopm_put_interface(dev->intf);
3213                 }
3214         }
3215
3216         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3217                 lan78xx_update_stats(dev);
3218
3219                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3220
3221                 mod_timer(&dev->stat_monitor,
3222                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3223
3224                 dev->delta = min((dev->delta * 2), 50);
3225         }
3226 }
3227
3228 static void intr_complete(struct urb *urb)
3229 {
3230         struct lan78xx_net *dev = urb->context;
3231         int status = urb->status;
3232
3233         switch (status) {
3234         /* success */
3235         case 0:
3236                 lan78xx_status(dev, urb);
3237                 break;
3238
3239         /* software-driven interface shutdown */
3240         case -ENOENT:                   /* urb killed */
3241         case -ESHUTDOWN:                /* hardware gone */
3242                 netif_dbg(dev, ifdown, dev->net,
3243                           "intr shutdown, code %d\n", status);
3244                 return;
3245
3246         /* NOTE:  not throttling like RX/TX, since this endpoint
3247          * already polls infrequently
3248          */
3249         default:
3250                 netdev_dbg(dev->net, "intr status %d\n", status);
3251                 break;
3252         }
3253
3254         if (!netif_running(dev->net))
3255                 return;
3256
3257         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3258         status = usb_submit_urb(urb, GFP_ATOMIC);
3259         if (status != 0)
3260                 netif_err(dev, timer, dev->net,
3261                           "intr resubmit --> %d\n", status);
3262 }
3263
3264 static void lan78xx_disconnect(struct usb_interface *intf)
3265 {
3266         struct lan78xx_net              *dev;
3267         struct usb_device               *udev;
3268         struct net_device               *net;
3269
3270         dev = usb_get_intfdata(intf);
3271         usb_set_intfdata(intf, NULL);
3272         if (!dev)
3273                 return;
3274
3275         udev = interface_to_usbdev(intf);
3276
3277         net = dev->net;
3278         unregister_netdev(net);
3279
3280         cancel_delayed_work_sync(&dev->wq);
3281
3282         usb_scuttle_anchored_urbs(&dev->deferred);
3283
3284         lan78xx_unbind(dev, intf);
3285
3286         usb_kill_urb(dev->urb_intr);
3287         usb_free_urb(dev->urb_intr);
3288
3289         free_netdev(net);
3290         usb_put_dev(udev);
3291 }
3292
3293 static void lan78xx_tx_timeout(struct net_device *net)
3294 {
3295         struct lan78xx_net *dev = netdev_priv(net);
3296
3297         unlink_urbs(dev, &dev->txq);
3298         tasklet_schedule(&dev->bh);
3299 }
3300
3301 static const struct net_device_ops lan78xx_netdev_ops = {
3302         .ndo_open               = lan78xx_open,
3303         .ndo_stop               = lan78xx_stop,
3304         .ndo_start_xmit         = lan78xx_start_xmit,
3305         .ndo_tx_timeout         = lan78xx_tx_timeout,
3306         .ndo_change_mtu         = lan78xx_change_mtu,
3307         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3308         .ndo_validate_addr      = eth_validate_addr,
3309         .ndo_do_ioctl           = lan78xx_ioctl,
3310         .ndo_set_rx_mode        = lan78xx_set_multicast,
3311         .ndo_set_features       = lan78xx_set_features,
3312         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3313         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3314 };
3315
3316 static void lan78xx_stat_monitor(unsigned long param)
3317 {
3318         struct lan78xx_net *dev;
3319
3320         dev = (struct lan78xx_net *)param;
3321
3322         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3323 }
3324
3325 static int lan78xx_probe(struct usb_interface *intf,
3326                          const struct usb_device_id *id)
3327 {
3328         struct lan78xx_net *dev;
3329         struct net_device *netdev;
3330         struct usb_device *udev;
3331         int ret;
3332         unsigned maxp;
3333         unsigned period;
3334         u8 *buf = NULL;
3335
3336         udev = interface_to_usbdev(intf);
3337         udev = usb_get_dev(udev);
3338
3339         ret = -ENOMEM;
3340         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3341         if (!netdev) {
3342                         dev_err(&intf->dev, "Error: OOM\n");
3343                         goto out1;
3344         }
3345
3346         /* netdev_printk() needs this */
3347         SET_NETDEV_DEV(netdev, &intf->dev);
3348
3349         dev = netdev_priv(netdev);
3350         dev->udev = udev;
3351         dev->intf = intf;
3352         dev->net = netdev;
3353         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3354                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3355
3356         skb_queue_head_init(&dev->rxq);
3357         skb_queue_head_init(&dev->txq);
3358         skb_queue_head_init(&dev->done);
3359         skb_queue_head_init(&dev->rxq_pause);
3360         skb_queue_head_init(&dev->txq_pend);
3361         mutex_init(&dev->phy_mutex);
3362
3363         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3364         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3365         init_usb_anchor(&dev->deferred);
3366
3367         netdev->netdev_ops = &lan78xx_netdev_ops;
3368         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3369         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3370
3371         dev->stat_monitor.function = lan78xx_stat_monitor;
3372         dev->stat_monitor.data = (unsigned long)dev;
3373         dev->delta = 1;
3374         init_timer(&dev->stat_monitor);
3375
3376         mutex_init(&dev->stats.access_lock);
3377
3378         ret = lan78xx_bind(dev, intf);
3379         if (ret < 0)
3380                 goto out2;
3381         strcpy(netdev->name, "eth%d");
3382
3383         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3384                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3385
3386         /* MTU range: 68 - 9000 */
3387         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3388
3389         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3390         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3391         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3392
3393         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3394         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3395
3396         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3397                                         dev->ep_intr->desc.bEndpointAddress &
3398                                         USB_ENDPOINT_NUMBER_MASK);
3399         period = dev->ep_intr->desc.bInterval;
3400
3401         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3402         buf = kmalloc(maxp, GFP_KERNEL);
3403         if (buf) {
3404                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3405                 if (!dev->urb_intr) {
3406                         kfree(buf);
3407                         goto out3;
3408                 } else {
3409                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3410                                          dev->pipe_intr, buf, maxp,
3411                                          intr_complete, dev, period);
3412                 }
3413         }
3414
3415         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3416
3417         /* driver requires remote-wakeup capability during autosuspend. */
3418         intf->needs_remote_wakeup = 1;
3419
3420         ret = register_netdev(netdev);
3421         if (ret != 0) {
3422                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3423                 goto out2;
3424         }
3425
3426         usb_set_intfdata(intf, dev);
3427
3428         ret = device_set_wakeup_enable(&udev->dev, true);
3429
3430          /* Default delay of 2sec has more overhead than advantage.
3431           * Set to 10sec as default.
3432           */
3433         pm_runtime_set_autosuspend_delay(&udev->dev,
3434                                          DEFAULT_AUTOSUSPEND_DELAY);
3435
3436         return 0;
3437
3438 out3:
3439         lan78xx_unbind(dev, intf);
3440 out2:
3441         free_netdev(netdev);
3442 out1:
3443         usb_put_dev(udev);
3444
3445         return ret;
3446 }
3447
3448 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3449 {
3450         const u16 crc16poly = 0x8005;
3451         int i;
3452         u16 bit, crc, msb;
3453         u8 data;
3454
3455         crc = 0xFFFF;
3456         for (i = 0; i < len; i++) {
3457                 data = *buf++;
3458                 for (bit = 0; bit < 8; bit++) {
3459                         msb = crc >> 15;
3460                         crc <<= 1;
3461
3462                         if (msb ^ (u16)(data & 1)) {
3463                                 crc ^= crc16poly;
3464                                 crc |= (u16)0x0001U;
3465                         }
3466                         data >>= 1;
3467                 }
3468         }
3469
3470         return crc;
3471 }
3472
3473 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3474 {
3475         u32 buf;
3476         int ret;
3477         int mask_index;
3478         u16 crc;
3479         u32 temp_wucsr;
3480         u32 temp_pmt_ctl;
3481         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3482         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3483         const u8 arp_type[2] = { 0x08, 0x06 };
3484
3485         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3486         buf &= ~MAC_TX_TXEN_;
3487         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3488         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3489         buf &= ~MAC_RX_RXEN_;
3490         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3491
3492         ret = lan78xx_write_reg(dev, WUCSR, 0);
3493         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3494         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3495
3496         temp_wucsr = 0;
3497
3498         temp_pmt_ctl = 0;
3499         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3500         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3501         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3502
3503         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3504                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3505
3506         mask_index = 0;
3507         if (wol & WAKE_PHY) {
3508                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3509
3510                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3511                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3512                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3513         }
3514         if (wol & WAKE_MAGIC) {
3515                 temp_wucsr |= WUCSR_MPEN_;
3516
3517                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3518                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3519                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3520         }
3521         if (wol & WAKE_BCAST) {
3522                 temp_wucsr |= WUCSR_BCST_EN_;
3523
3524                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3525                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3526                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3527         }
3528         if (wol & WAKE_MCAST) {
3529                 temp_wucsr |= WUCSR_WAKE_EN_;
3530
3531                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3532                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3533                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3534                                         WUF_CFGX_EN_ |
3535                                         WUF_CFGX_TYPE_MCAST_ |
3536                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3537                                         (crc & WUF_CFGX_CRC16_MASK_));
3538
3539                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3540                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3541                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3542                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3543                 mask_index++;
3544
3545                 /* for IPv6 Multicast */
3546                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3547                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3548                                         WUF_CFGX_EN_ |
3549                                         WUF_CFGX_TYPE_MCAST_ |
3550                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3551                                         (crc & WUF_CFGX_CRC16_MASK_));
3552
3553                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3554                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3555                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3556                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3557                 mask_index++;
3558
3559                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3560                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3561                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3562         }
3563         if (wol & WAKE_UCAST) {
3564                 temp_wucsr |= WUCSR_PFDA_EN_;
3565
3566                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3567                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3568                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3569         }
3570         if (wol & WAKE_ARP) {
3571                 temp_wucsr |= WUCSR_WAKE_EN_;
3572
3573                 /* set WUF_CFG & WUF_MASK
3574                  * for packettype (offset 12,13) = ARP (0x0806)
3575                  */
3576                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3577                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3578                                         WUF_CFGX_EN_ |
3579                                         WUF_CFGX_TYPE_ALL_ |
3580                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3581                                         (crc & WUF_CFGX_CRC16_MASK_));
3582
3583                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3584                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3585                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3586                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3587                 mask_index++;
3588
3589                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3590                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3591                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3592         }
3593
3594         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3595
3596         /* when multiple WOL bits are set */
3597         if (hweight_long((unsigned long)wol) > 1) {
3598                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3599                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3600                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3601         }
3602         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3603
3604         /* clear WUPS */
3605         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3606         buf |= PMT_CTL_WUPS_MASK_;
3607         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3608
3609         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3610         buf |= MAC_RX_RXEN_;
3611         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3612
3613         return 0;
3614 }
3615
3616 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3617 {
3618         struct lan78xx_net *dev = usb_get_intfdata(intf);
3619         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3620         u32 buf;
3621         int ret;
3622         int event;
3623
3624         event = message.event;
3625
3626         if (!dev->suspend_count++) {
3627                 spin_lock_irq(&dev->txq.lock);
3628                 /* don't autosuspend while transmitting */
3629                 if ((skb_queue_len(&dev->txq) ||
3630                      skb_queue_len(&dev->txq_pend)) &&
3631                         PMSG_IS_AUTO(message)) {
3632                         spin_unlock_irq(&dev->txq.lock);
3633                         ret = -EBUSY;
3634                         goto out;
3635                 } else {
3636                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3637                         spin_unlock_irq(&dev->txq.lock);
3638                 }
3639
3640                 /* stop TX & RX */
3641                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3642                 buf &= ~MAC_TX_TXEN_;
3643                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3644                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3645                 buf &= ~MAC_RX_RXEN_;
3646                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3647
3648                 /* empty out the rx and queues */
3649                 netif_device_detach(dev->net);
3650                 lan78xx_terminate_urbs(dev);
3651                 usb_kill_urb(dev->urb_intr);
3652
3653                 /* reattach */
3654                 netif_device_attach(dev->net);
3655         }
3656
3657         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3658                 del_timer(&dev->stat_monitor);
3659
3660                 if (PMSG_IS_AUTO(message)) {
3661                         /* auto suspend (selective suspend) */
3662                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3663                         buf &= ~MAC_TX_TXEN_;
3664                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3665                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3666                         buf &= ~MAC_RX_RXEN_;
3667                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3668
3669                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3670                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3671                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3672
3673                         /* set goodframe wakeup */
3674                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3675
3676                         buf |= WUCSR_RFE_WAKE_EN_;
3677                         buf |= WUCSR_STORE_WAKE_;
3678
3679                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3680
3681                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3682
3683                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3684                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3685
3686                         buf |= PMT_CTL_PHY_WAKE_EN_;
3687                         buf |= PMT_CTL_WOL_EN_;
3688                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3689                         buf |= PMT_CTL_SUS_MODE_3_;
3690
3691                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3692
3693                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3694
3695                         buf |= PMT_CTL_WUPS_MASK_;
3696
3697                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3698
3699                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3700                         buf |= MAC_RX_RXEN_;
3701                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3702                 } else {
3703                         lan78xx_set_suspend(dev, pdata->wol);
3704                 }
3705         }
3706
3707         ret = 0;
3708 out:
3709         return ret;
3710 }
3711
3712 static int lan78xx_resume(struct usb_interface *intf)
3713 {
3714         struct lan78xx_net *dev = usb_get_intfdata(intf);
3715         struct sk_buff *skb;
3716         struct urb *res;
3717         int ret;
3718         u32 buf;
3719
3720         if (!timer_pending(&dev->stat_monitor)) {
3721                 dev->delta = 1;
3722                 mod_timer(&dev->stat_monitor,
3723                           jiffies + STAT_UPDATE_TIMER);
3724         }
3725
3726         if (!--dev->suspend_count) {
3727                 /* resume interrupt URBs */
3728                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3729                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3730
3731                 spin_lock_irq(&dev->txq.lock);
3732                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3733                         skb = (struct sk_buff *)res->context;
3734                         ret = usb_submit_urb(res, GFP_ATOMIC);
3735                         if (ret < 0) {
3736                                 dev_kfree_skb_any(skb);
3737                                 usb_free_urb(res);
3738                                 usb_autopm_put_interface_async(dev->intf);
3739                         } else {
3740                                 netif_trans_update(dev->net);
3741                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3742                         }
3743                 }
3744
3745                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3746                 spin_unlock_irq(&dev->txq.lock);
3747
3748                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3749                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3750                                 netif_start_queue(dev->net);
3751                         tasklet_schedule(&dev->bh);
3752                 }
3753         }
3754
3755         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3756         ret = lan78xx_write_reg(dev, WUCSR, 0);
3757         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3758
3759         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3760                                              WUCSR2_ARP_RCD_ |
3761                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3762                                              WUCSR2_IPV4_TCPSYN_RCD_);
3763
3764         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3765                                             WUCSR_EEE_RX_WAKE_ |
3766                                             WUCSR_PFDA_FR_ |
3767                                             WUCSR_RFE_WAKE_FR_ |
3768                                             WUCSR_WUFR_ |
3769                                             WUCSR_MPR_ |
3770                                             WUCSR_BCST_FR_);
3771
3772         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3773         buf |= MAC_TX_TXEN_;
3774         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3775
3776         return 0;
3777 }
3778
3779 static int lan78xx_reset_resume(struct usb_interface *intf)
3780 {
3781         struct lan78xx_net *dev = usb_get_intfdata(intf);
3782
3783         lan78xx_reset(dev);
3784
3785         lan78xx_phy_init(dev);
3786
3787         return lan78xx_resume(intf);
3788 }
3789
3790 static const struct usb_device_id products[] = {
3791         {
3792         /* LAN7800 USB Gigabit Ethernet Device */
3793         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3794         },
3795         {
3796         /* LAN7850 USB Gigabit Ethernet Device */
3797         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3798         },
3799         {},
3800 };
3801 MODULE_DEVICE_TABLE(usb, products);
3802
3803 static struct usb_driver lan78xx_driver = {
3804         .name                   = DRIVER_NAME,
3805         .id_table               = products,
3806         .probe                  = lan78xx_probe,
3807         .disconnect             = lan78xx_disconnect,
3808         .suspend                = lan78xx_suspend,
3809         .resume                 = lan78xx_resume,
3810         .reset_resume           = lan78xx_reset_resume,
3811         .supports_autosuspend   = 1,
3812         .disable_hub_initiated_lpm = 1,
3813 };
3814
3815 module_usb_driver(lan78xx_driver);
3816
3817 MODULE_AUTHOR(DRIVER_AUTHOR);
3818 MODULE_DESCRIPTION(DRIVER_DESC);
3819 MODULE_LICENSE("GPL");