Merge tag 'for-4.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43
44 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME     "lan78xx"
47
48 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
49 #define THROTTLE_JIFFIES                (HZ / 8)
50 #define UNLINK_TIMEOUT_MS               3
51
52 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
53
54 #define SS_USB_PKT_SIZE                 (1024)
55 #define HS_USB_PKT_SIZE                 (512)
56 #define FS_USB_PKT_SIZE                 (64)
57
58 #define MAX_RX_FIFO_SIZE                (12 * 1024)
59 #define MAX_TX_FIFO_SIZE                (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY           (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE          (9000)
63 #define DEFAULT_TX_CSUM_ENABLE          (true)
64 #define DEFAULT_RX_CSUM_ENABLE          (true)
65 #define DEFAULT_TSO_CSUM_ENABLE         (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
67 #define TX_OVERHEAD                     (8)
68 #define RXW_PADDING                     2
69
70 #define LAN78XX_USB_VENDOR_ID           (0x0424)
71 #define LAN7800_USB_PRODUCT_ID          (0x7800)
72 #define LAN7850_USB_PRODUCT_ID          (0x7850)
73 #define LAN7801_USB_PRODUCT_ID          (0x7801)
74 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
75 #define LAN78XX_OTP_MAGIC               (0x78F3)
76
77 #define MII_READ                        1
78 #define MII_WRITE                       0
79
80 #define EEPROM_INDICATOR                (0xA5)
81 #define EEPROM_MAC_OFFSET               (0x01)
82 #define MAX_EEPROM_SIZE                 512
83 #define OTP_INDICATOR_1                 (0xF3)
84 #define OTP_INDICATOR_2                 (0xF7)
85
86 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
87                                          WAKE_MCAST | WAKE_BCAST | \
88                                          WAKE_ARP | WAKE_MAGIC)
89
90 /* USB related defines */
91 #define BULK_IN_PIPE                    1
92 #define BULK_OUT_PIPE                   2
93
94 /* default autosuspend delay (mSec)*/
95 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
96
97 /* statistic update interval (mSec) */
98 #define STAT_UPDATE_TIMER               (1 * 1000)
99
100 /* defines interrupts from interrupt EP */
101 #define MAX_INT_EP                      (32)
102 #define INT_EP_INTEP                    (31)
103 #define INT_EP_OTP_WR_DONE              (28)
104 #define INT_EP_EEE_TX_LPI_START         (26)
105 #define INT_EP_EEE_TX_LPI_STOP          (25)
106 #define INT_EP_EEE_RX_LPI               (24)
107 #define INT_EP_MAC_RESET_TIMEOUT        (23)
108 #define INT_EP_RDFO                     (22)
109 #define INT_EP_TXE                      (21)
110 #define INT_EP_USB_STATUS               (20)
111 #define INT_EP_TX_DIS                   (19)
112 #define INT_EP_RX_DIS                   (18)
113 #define INT_EP_PHY                      (17)
114 #define INT_EP_DP                       (16)
115 #define INT_EP_MAC_ERR                  (15)
116 #define INT_EP_TDFU                     (14)
117 #define INT_EP_TDFO                     (13)
118 #define INT_EP_UTX                      (12)
119 #define INT_EP_GPIO_11                  (11)
120 #define INT_EP_GPIO_10                  (10)
121 #define INT_EP_GPIO_9                   (9)
122 #define INT_EP_GPIO_8                   (8)
123 #define INT_EP_GPIO_7                   (7)
124 #define INT_EP_GPIO_6                   (6)
125 #define INT_EP_GPIO_5                   (5)
126 #define INT_EP_GPIO_4                   (4)
127 #define INT_EP_GPIO_3                   (3)
128 #define INT_EP_GPIO_2                   (2)
129 #define INT_EP_GPIO_1                   (1)
130 #define INT_EP_GPIO_0                   (0)
131
132 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133         "RX FCS Errors",
134         "RX Alignment Errors",
135         "Rx Fragment Errors",
136         "RX Jabber Errors",
137         "RX Undersize Frame Errors",
138         "RX Oversize Frame Errors",
139         "RX Dropped Frames",
140         "RX Unicast Byte Count",
141         "RX Broadcast Byte Count",
142         "RX Multicast Byte Count",
143         "RX Unicast Frames",
144         "RX Broadcast Frames",
145         "RX Multicast Frames",
146         "RX Pause Frames",
147         "RX 64 Byte Frames",
148         "RX 65 - 127 Byte Frames",
149         "RX 128 - 255 Byte Frames",
150         "RX 256 - 511 Bytes Frames",
151         "RX 512 - 1023 Byte Frames",
152         "RX 1024 - 1518 Byte Frames",
153         "RX Greater 1518 Byte Frames",
154         "EEE RX LPI Transitions",
155         "EEE RX LPI Time",
156         "TX FCS Errors",
157         "TX Excess Deferral Errors",
158         "TX Carrier Errors",
159         "TX Bad Byte Count",
160         "TX Single Collisions",
161         "TX Multiple Collisions",
162         "TX Excessive Collision",
163         "TX Late Collisions",
164         "TX Unicast Byte Count",
165         "TX Broadcast Byte Count",
166         "TX Multicast Byte Count",
167         "TX Unicast Frames",
168         "TX Broadcast Frames",
169         "TX Multicast Frames",
170         "TX Pause Frames",
171         "TX 64 Byte Frames",
172         "TX 65 - 127 Byte Frames",
173         "TX 128 - 255 Byte Frames",
174         "TX 256 - 511 Bytes Frames",
175         "TX 512 - 1023 Byte Frames",
176         "TX 1024 - 1518 Byte Frames",
177         "TX Greater 1518 Byte Frames",
178         "EEE TX LPI Transitions",
179         "EEE TX LPI Time",
180 };
181
182 struct lan78xx_statstage {
183         u32 rx_fcs_errors;
184         u32 rx_alignment_errors;
185         u32 rx_fragment_errors;
186         u32 rx_jabber_errors;
187         u32 rx_undersize_frame_errors;
188         u32 rx_oversize_frame_errors;
189         u32 rx_dropped_frames;
190         u32 rx_unicast_byte_count;
191         u32 rx_broadcast_byte_count;
192         u32 rx_multicast_byte_count;
193         u32 rx_unicast_frames;
194         u32 rx_broadcast_frames;
195         u32 rx_multicast_frames;
196         u32 rx_pause_frames;
197         u32 rx_64_byte_frames;
198         u32 rx_65_127_byte_frames;
199         u32 rx_128_255_byte_frames;
200         u32 rx_256_511_bytes_frames;
201         u32 rx_512_1023_byte_frames;
202         u32 rx_1024_1518_byte_frames;
203         u32 rx_greater_1518_byte_frames;
204         u32 eee_rx_lpi_transitions;
205         u32 eee_rx_lpi_time;
206         u32 tx_fcs_errors;
207         u32 tx_excess_deferral_errors;
208         u32 tx_carrier_errors;
209         u32 tx_bad_byte_count;
210         u32 tx_single_collisions;
211         u32 tx_multiple_collisions;
212         u32 tx_excessive_collision;
213         u32 tx_late_collisions;
214         u32 tx_unicast_byte_count;
215         u32 tx_broadcast_byte_count;
216         u32 tx_multicast_byte_count;
217         u32 tx_unicast_frames;
218         u32 tx_broadcast_frames;
219         u32 tx_multicast_frames;
220         u32 tx_pause_frames;
221         u32 tx_64_byte_frames;
222         u32 tx_65_127_byte_frames;
223         u32 tx_128_255_byte_frames;
224         u32 tx_256_511_bytes_frames;
225         u32 tx_512_1023_byte_frames;
226         u32 tx_1024_1518_byte_frames;
227         u32 tx_greater_1518_byte_frames;
228         u32 eee_tx_lpi_transitions;
229         u32 eee_tx_lpi_time;
230 };
231
232 struct lan78xx_statstage64 {
233         u64 rx_fcs_errors;
234         u64 rx_alignment_errors;
235         u64 rx_fragment_errors;
236         u64 rx_jabber_errors;
237         u64 rx_undersize_frame_errors;
238         u64 rx_oversize_frame_errors;
239         u64 rx_dropped_frames;
240         u64 rx_unicast_byte_count;
241         u64 rx_broadcast_byte_count;
242         u64 rx_multicast_byte_count;
243         u64 rx_unicast_frames;
244         u64 rx_broadcast_frames;
245         u64 rx_multicast_frames;
246         u64 rx_pause_frames;
247         u64 rx_64_byte_frames;
248         u64 rx_65_127_byte_frames;
249         u64 rx_128_255_byte_frames;
250         u64 rx_256_511_bytes_frames;
251         u64 rx_512_1023_byte_frames;
252         u64 rx_1024_1518_byte_frames;
253         u64 rx_greater_1518_byte_frames;
254         u64 eee_rx_lpi_transitions;
255         u64 eee_rx_lpi_time;
256         u64 tx_fcs_errors;
257         u64 tx_excess_deferral_errors;
258         u64 tx_carrier_errors;
259         u64 tx_bad_byte_count;
260         u64 tx_single_collisions;
261         u64 tx_multiple_collisions;
262         u64 tx_excessive_collision;
263         u64 tx_late_collisions;
264         u64 tx_unicast_byte_count;
265         u64 tx_broadcast_byte_count;
266         u64 tx_multicast_byte_count;
267         u64 tx_unicast_frames;
268         u64 tx_broadcast_frames;
269         u64 tx_multicast_frames;
270         u64 tx_pause_frames;
271         u64 tx_64_byte_frames;
272         u64 tx_65_127_byte_frames;
273         u64 tx_128_255_byte_frames;
274         u64 tx_256_511_bytes_frames;
275         u64 tx_512_1023_byte_frames;
276         u64 tx_1024_1518_byte_frames;
277         u64 tx_greater_1518_byte_frames;
278         u64 eee_tx_lpi_transitions;
279         u64 eee_tx_lpi_time;
280 };
281
282 static u32 lan78xx_regs[] = {
283         ID_REV,
284         INT_STS,
285         HW_CFG,
286         PMT_CTL,
287         E2P_CMD,
288         E2P_DATA,
289         USB_STATUS,
290         VLAN_TYPE,
291         MAC_CR,
292         MAC_RX,
293         MAC_TX,
294         FLOW,
295         ERR_STS,
296         MII_ACC,
297         MII_DATA,
298         EEE_TX_LPI_REQ_DLY,
299         EEE_TW_TX_SYS,
300         EEE_TX_LPI_REM_DLY,
301         WUCSR
302 };
303
304 #define PHY_REG_SIZE (32 * sizeof(u32))
305
306 struct lan78xx_net;
307
308 struct lan78xx_priv {
309         struct lan78xx_net *dev;
310         u32 rfe_ctl;
311         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
312         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
313         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
314         struct mutex dataport_mutex; /* for dataport access */
315         spinlock_t rfe_ctl_lock; /* for rfe register access */
316         struct work_struct set_multicast;
317         struct work_struct set_vlan;
318         u32 wol;
319 };
320
321 enum skb_state {
322         illegal = 0,
323         tx_start,
324         tx_done,
325         rx_start,
326         rx_done,
327         rx_cleanup,
328         unlink_start
329 };
330
331 struct skb_data {               /* skb->cb is one of these */
332         struct urb *urb;
333         struct lan78xx_net *dev;
334         enum skb_state state;
335         size_t length;
336         int num_of_packet;
337 };
338
339 struct usb_context {
340         struct usb_ctrlrequest req;
341         struct lan78xx_net *dev;
342 };
343
344 #define EVENT_TX_HALT                   0
345 #define EVENT_RX_HALT                   1
346 #define EVENT_RX_MEMORY                 2
347 #define EVENT_STS_SPLIT                 3
348 #define EVENT_LINK_RESET                4
349 #define EVENT_RX_PAUSED                 5
350 #define EVENT_DEV_WAKING                6
351 #define EVENT_DEV_ASLEEP                7
352 #define EVENT_DEV_OPEN                  8
353 #define EVENT_STAT_UPDATE               9
354
355 struct statstage {
356         struct mutex                    access_lock;    /* for stats access */
357         struct lan78xx_statstage        saved;
358         struct lan78xx_statstage        rollover_count;
359         struct lan78xx_statstage        rollover_max;
360         struct lan78xx_statstage64      curr_stat;
361 };
362
363 struct irq_domain_data {
364         struct irq_domain       *irqdomain;
365         unsigned int            phyirq;
366         struct irq_chip         *irqchip;
367         irq_flow_handler_t      irq_handler;
368         u32                     irqenable;
369         struct mutex            irq_lock;               /* for irq bus access */
370 };
371
372 struct lan78xx_net {
373         struct net_device       *net;
374         struct usb_device       *udev;
375         struct usb_interface    *intf;
376         void                    *driver_priv;
377
378         int                     rx_qlen;
379         int                     tx_qlen;
380         struct sk_buff_head     rxq;
381         struct sk_buff_head     txq;
382         struct sk_buff_head     done;
383         struct sk_buff_head     rxq_pause;
384         struct sk_buff_head     txq_pend;
385
386         struct tasklet_struct   bh;
387         struct delayed_work     wq;
388
389         struct usb_host_endpoint *ep_blkin;
390         struct usb_host_endpoint *ep_blkout;
391         struct usb_host_endpoint *ep_intr;
392
393         int                     msg_enable;
394
395         struct urb              *urb_intr;
396         struct usb_anchor       deferred;
397
398         struct mutex            phy_mutex; /* for phy access */
399         unsigned                pipe_in, pipe_out, pipe_intr;
400
401         u32                     hard_mtu;       /* count any extra framing */
402         size_t                  rx_urb_size;    /* size for rx urbs */
403
404         unsigned long           flags;
405
406         wait_queue_head_t       *wait;
407         unsigned char           suspend_count;
408
409         unsigned                maxpacket;
410         struct timer_list       delay;
411         struct timer_list       stat_monitor;
412
413         unsigned long           data[5];
414
415         int                     link_on;
416         u8                      mdix_ctrl;
417
418         u32                     chipid;
419         u32                     chiprev;
420         struct mii_bus          *mdiobus;
421         phy_interface_t         interface;
422
423         int                     fc_autoneg;
424         u8                      fc_request_control;
425
426         int                     delta;
427         struct statstage        stats;
428
429         struct irq_domain_data  domain_data;
430 };
431
432 /* define external phy id */
433 #define PHY_LAN8835                     (0x0007C130)
434 #define PHY_KSZ9031RNX                  (0x00221620)
435
436 /* use ethtool to change the level for any given device */
437 static int msg_level = -1;
438 module_param(msg_level, int, 0);
439 MODULE_PARM_DESC(msg_level, "Override default message level");
440
441 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
442 {
443         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
444         int ret;
445
446         if (!buf)
447                 return -ENOMEM;
448
449         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
450                               USB_VENDOR_REQUEST_READ_REGISTER,
451                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
452                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
453         if (likely(ret >= 0)) {
454                 le32_to_cpus(buf);
455                 *data = *buf;
456         } else {
457                 netdev_warn(dev->net,
458                             "Failed to read register index 0x%08x. ret = %d",
459                             index, ret);
460         }
461
462         kfree(buf);
463
464         return ret;
465 }
466
467 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
468 {
469         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
470         int ret;
471
472         if (!buf)
473                 return -ENOMEM;
474
475         *buf = data;
476         cpu_to_le32s(buf);
477
478         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
479                               USB_VENDOR_REQUEST_WRITE_REGISTER,
480                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
481                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
482         if (unlikely(ret < 0)) {
483                 netdev_warn(dev->net,
484                             "Failed to write register index 0x%08x. ret = %d",
485                             index, ret);
486         }
487
488         kfree(buf);
489
490         return ret;
491 }
492
493 static int lan78xx_read_stats(struct lan78xx_net *dev,
494                               struct lan78xx_statstage *data)
495 {
496         int ret = 0;
497         int i;
498         struct lan78xx_statstage *stats;
499         u32 *src;
500         u32 *dst;
501
502         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
503         if (!stats)
504                 return -ENOMEM;
505
506         ret = usb_control_msg(dev->udev,
507                               usb_rcvctrlpipe(dev->udev, 0),
508                               USB_VENDOR_REQUEST_GET_STATS,
509                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
510                               0,
511                               0,
512                               (void *)stats,
513                               sizeof(*stats),
514                               USB_CTRL_SET_TIMEOUT);
515         if (likely(ret >= 0)) {
516                 src = (u32 *)stats;
517                 dst = (u32 *)data;
518                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
519                         le32_to_cpus(&src[i]);
520                         dst[i] = src[i];
521                 }
522         } else {
523                 netdev_warn(dev->net,
524                             "Failed to read stat ret = 0x%x", ret);
525         }
526
527         kfree(stats);
528
529         return ret;
530 }
531
532 #define check_counter_rollover(struct1, dev_stats, member) {    \
533         if (struct1->member < dev_stats.saved.member)           \
534                 dev_stats.rollover_count.member++;              \
535         }
536
537 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
538                                         struct lan78xx_statstage *stats)
539 {
540         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
541         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
542         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
543         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
544         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
545         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
546         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
547         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
548         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
549         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
550         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
551         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
552         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
553         check_counter_rollover(stats, dev->stats, rx_pause_frames);
554         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
555         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
556         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
557         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
558         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
559         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
561         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
562         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
563         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
564         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
565         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
566         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
567         check_counter_rollover(stats, dev->stats, tx_single_collisions);
568         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
569         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
570         check_counter_rollover(stats, dev->stats, tx_late_collisions);
571         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
572         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
573         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
574         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
575         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
576         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
577         check_counter_rollover(stats, dev->stats, tx_pause_frames);
578         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
579         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
580         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
581         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
582         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
583         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
584         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
585         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
586         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
587
588         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
589 }
590
591 static void lan78xx_update_stats(struct lan78xx_net *dev)
592 {
593         u32 *p, *count, *max;
594         u64 *data;
595         int i;
596         struct lan78xx_statstage lan78xx_stats;
597
598         if (usb_autopm_get_interface(dev->intf) < 0)
599                 return;
600
601         p = (u32 *)&lan78xx_stats;
602         count = (u32 *)&dev->stats.rollover_count;
603         max = (u32 *)&dev->stats.rollover_max;
604         data = (u64 *)&dev->stats.curr_stat;
605
606         mutex_lock(&dev->stats.access_lock);
607
608         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
609                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
610
611         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
612                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
613
614         mutex_unlock(&dev->stats.access_lock);
615
616         usb_autopm_put_interface(dev->intf);
617 }
618
619 /* Loop until the read is completed with timeout called with phy_mutex held */
620 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
621 {
622         unsigned long start_time = jiffies;
623         u32 val;
624         int ret;
625
626         do {
627                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
628                 if (unlikely(ret < 0))
629                         return -EIO;
630
631                 if (!(val & MII_ACC_MII_BUSY_))
632                         return 0;
633         } while (!time_after(jiffies, start_time + HZ));
634
635         return -EIO;
636 }
637
638 static inline u32 mii_access(int id, int index, int read)
639 {
640         u32 ret;
641
642         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
643         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
644         if (read)
645                 ret |= MII_ACC_MII_READ_;
646         else
647                 ret |= MII_ACC_MII_WRITE_;
648         ret |= MII_ACC_MII_BUSY_;
649
650         return ret;
651 }
652
653 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_) ||
665                     (val & E2P_CMD_EPC_TIMEOUT_))
666                         break;
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
671                 netdev_warn(dev->net, "EEPROM read operation timeout");
672                 return -EIO;
673         }
674
675         return 0;
676 }
677
678 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
679 {
680         unsigned long start_time = jiffies;
681         u32 val;
682         int ret;
683
684         do {
685                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
686                 if (unlikely(ret < 0))
687                         return -EIO;
688
689                 if (!(val & E2P_CMD_EPC_BUSY_))
690                         return 0;
691
692                 usleep_range(40, 100);
693         } while (!time_after(jiffies, start_time + HZ));
694
695         netdev_warn(dev->net, "EEPROM is busy");
696         return -EIO;
697 }
698
699 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
700                                    u32 length, u8 *data)
701 {
702         u32 val;
703         u32 saved;
704         int i, ret;
705         int retval;
706
707         /* depends on chip, some EEPROM pins are muxed with LED function.
708          * disable & restore LED function to access EEPROM.
709          */
710         ret = lan78xx_read_reg(dev, HW_CFG, &val);
711         saved = val;
712         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
713                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
714                 ret = lan78xx_write_reg(dev, HW_CFG, val);
715         }
716
717         retval = lan78xx_eeprom_confirm_not_busy(dev);
718         if (retval)
719                 return retval;
720
721         for (i = 0; i < length; i++) {
722                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
723                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
724                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
725                 if (unlikely(ret < 0)) {
726                         retval = -EIO;
727                         goto exit;
728                 }
729
730                 retval = lan78xx_wait_eeprom(dev);
731                 if (retval < 0)
732                         goto exit;
733
734                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
735                 if (unlikely(ret < 0)) {
736                         retval = -EIO;
737                         goto exit;
738                 }
739
740                 data[i] = val & 0xFF;
741                 offset++;
742         }
743
744         retval = 0;
745 exit:
746         if (dev->chipid == ID_REV_CHIP_ID_7800_)
747                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
748
749         return retval;
750 }
751
752 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
753                                u32 length, u8 *data)
754 {
755         u8 sig;
756         int ret;
757
758         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
759         if ((ret == 0) && (sig == EEPROM_INDICATOR))
760                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
761         else
762                 ret = -EINVAL;
763
764         return ret;
765 }
766
767 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
768                                     u32 length, u8 *data)
769 {
770         u32 val;
771         u32 saved;
772         int i, ret;
773         int retval;
774
775         /* depends on chip, some EEPROM pins are muxed with LED function.
776          * disable & restore LED function to access EEPROM.
777          */
778         ret = lan78xx_read_reg(dev, HW_CFG, &val);
779         saved = val;
780         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
781                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
782                 ret = lan78xx_write_reg(dev, HW_CFG, val);
783         }
784
785         retval = lan78xx_eeprom_confirm_not_busy(dev);
786         if (retval)
787                 goto exit;
788
789         /* Issue write/erase enable command */
790         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
791         ret = lan78xx_write_reg(dev, E2P_CMD, val);
792         if (unlikely(ret < 0)) {
793                 retval = -EIO;
794                 goto exit;
795         }
796
797         retval = lan78xx_wait_eeprom(dev);
798         if (retval < 0)
799                 goto exit;
800
801         for (i = 0; i < length; i++) {
802                 /* Fill data register */
803                 val = data[i];
804                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
805                 if (ret < 0) {
806                         retval = -EIO;
807                         goto exit;
808                 }
809
810                 /* Send "write" command */
811                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
812                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
813                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
814                 if (ret < 0) {
815                         retval = -EIO;
816                         goto exit;
817                 }
818
819                 retval = lan78xx_wait_eeprom(dev);
820                 if (retval < 0)
821                         goto exit;
822
823                 offset++;
824         }
825
826         retval = 0;
827 exit:
828         if (dev->chipid == ID_REV_CHIP_ID_7800_)
829                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
830
831         return retval;
832 }
833
834 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
835                                 u32 length, u8 *data)
836 {
837         int i;
838         int ret;
839         u32 buf;
840         unsigned long timeout;
841
842         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843
844         if (buf & OTP_PWR_DN_PWRDN_N_) {
845                 /* clear it and wait to be cleared */
846                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
847
848                 timeout = jiffies + HZ;
849                 do {
850                         usleep_range(1, 10);
851                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
852                         if (time_after(jiffies, timeout)) {
853                                 netdev_warn(dev->net,
854                                             "timeout on OTP_PWR_DN");
855                                 return -EIO;
856                         }
857                 } while (buf & OTP_PWR_DN_PWRDN_N_);
858         }
859
860         for (i = 0; i < length; i++) {
861                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
862                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
863                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
864                                         ((offset + i) & OTP_ADDR2_10_3));
865
866                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
867                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
868
869                 timeout = jiffies + HZ;
870                 do {
871                         udelay(1);
872                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
873                         if (time_after(jiffies, timeout)) {
874                                 netdev_warn(dev->net,
875                                             "timeout on OTP_STATUS");
876                                 return -EIO;
877                         }
878                 } while (buf & OTP_STATUS_BUSY_);
879
880                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
881
882                 data[i] = (u8)(buf & 0xFF);
883         }
884
885         return 0;
886 }
887
888 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
889                                  u32 length, u8 *data)
890 {
891         int i;
892         int ret;
893         u32 buf;
894         unsigned long timeout;
895
896         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897
898         if (buf & OTP_PWR_DN_PWRDN_N_) {
899                 /* clear it and wait to be cleared */
900                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
901
902                 timeout = jiffies + HZ;
903                 do {
904                         udelay(1);
905                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
906                         if (time_after(jiffies, timeout)) {
907                                 netdev_warn(dev->net,
908                                             "timeout on OTP_PWR_DN completion");
909                                 return -EIO;
910                         }
911                 } while (buf & OTP_PWR_DN_PWRDN_N_);
912         }
913
914         /* set to BYTE program mode */
915         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
916
917         for (i = 0; i < length; i++) {
918                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
919                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
920                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
921                                         ((offset + i) & OTP_ADDR2_10_3));
922                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
923                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
924                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
925
926                 timeout = jiffies + HZ;
927                 do {
928                         udelay(1);
929                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
930                         if (time_after(jiffies, timeout)) {
931                                 netdev_warn(dev->net,
932                                             "Timeout on OTP_STATUS completion");
933                                 return -EIO;
934                         }
935                 } while (buf & OTP_STATUS_BUSY_);
936         }
937
938         return 0;
939 }
940
941 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
942                             u32 length, u8 *data)
943 {
944         u8 sig;
945         int ret;
946
947         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
948
949         if (ret == 0) {
950                 if (sig == OTP_INDICATOR_1)
951                         offset = offset;
952                 else if (sig == OTP_INDICATOR_2)
953                         offset += 0x100;
954                 else
955                         ret = -EINVAL;
956                 if (!ret)
957                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
958         }
959
960         return ret;
961 }
962
963 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
964 {
965         int i, ret;
966
967         for (i = 0; i < 100; i++) {
968                 u32 dp_sel;
969
970                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
971                 if (unlikely(ret < 0))
972                         return -EIO;
973
974                 if (dp_sel & DP_SEL_DPRDY_)
975                         return 0;
976
977                 usleep_range(40, 100);
978         }
979
980         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
981
982         return -EIO;
983 }
984
985 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
986                                   u32 addr, u32 length, u32 *buf)
987 {
988         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
989         u32 dp_sel;
990         int i, ret;
991
992         if (usb_autopm_get_interface(dev->intf) < 0)
993                         return 0;
994
995         mutex_lock(&pdata->dataport_mutex);
996
997         ret = lan78xx_dataport_wait_not_busy(dev);
998         if (ret < 0)
999                 goto done;
1000
1001         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1002
1003         dp_sel &= ~DP_SEL_RSEL_MASK_;
1004         dp_sel |= ram_select;
1005         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1006
1007         for (i = 0; i < length; i++) {
1008                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1009
1010                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1011
1012                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1013
1014                 ret = lan78xx_dataport_wait_not_busy(dev);
1015                 if (ret < 0)
1016                         goto done;
1017         }
1018
1019 done:
1020         mutex_unlock(&pdata->dataport_mutex);
1021         usb_autopm_put_interface(dev->intf);
1022
1023         return ret;
1024 }
1025
1026 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1027                                     int index, u8 addr[ETH_ALEN])
1028 {
1029         u32     temp;
1030
1031         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1032                 temp = addr[3];
1033                 temp = addr[2] | (temp << 8);
1034                 temp = addr[1] | (temp << 8);
1035                 temp = addr[0] | (temp << 8);
1036                 pdata->pfilter_table[index][1] = temp;
1037                 temp = addr[5];
1038                 temp = addr[4] | (temp << 8);
1039                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1040                 pdata->pfilter_table[index][0] = temp;
1041         }
1042 }
1043
1044 /* returns hash bit number for given MAC address */
1045 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1046 {
1047         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1048 }
1049
1050 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1051 {
1052         struct lan78xx_priv *pdata =
1053                         container_of(param, struct lan78xx_priv, set_multicast);
1054         struct lan78xx_net *dev = pdata->dev;
1055         int i;
1056         int ret;
1057
1058         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1059                   pdata->rfe_ctl);
1060
1061         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1062                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1063
1064         for (i = 1; i < NUM_OF_MAF; i++) {
1065                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1066                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1067                                         pdata->pfilter_table[i][1]);
1068                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1069                                         pdata->pfilter_table[i][0]);
1070         }
1071
1072         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1073 }
1074
1075 static void lan78xx_set_multicast(struct net_device *netdev)
1076 {
1077         struct lan78xx_net *dev = netdev_priv(netdev);
1078         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1079         unsigned long flags;
1080         int i;
1081
1082         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1083
1084         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1085                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1086
1087         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1088                         pdata->mchash_table[i] = 0;
1089         /* pfilter_table[0] has own HW address */
1090         for (i = 1; i < NUM_OF_MAF; i++) {
1091                         pdata->pfilter_table[i][0] =
1092                         pdata->pfilter_table[i][1] = 0;
1093         }
1094
1095         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1096
1097         if (dev->net->flags & IFF_PROMISC) {
1098                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1099                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1100         } else {
1101                 if (dev->net->flags & IFF_ALLMULTI) {
1102                         netif_dbg(dev, drv, dev->net,
1103                                   "receive all multicast enabled");
1104                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1105                 }
1106         }
1107
1108         if (netdev_mc_count(dev->net)) {
1109                 struct netdev_hw_addr *ha;
1110                 int i;
1111
1112                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1113
1114                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1115
1116                 i = 1;
1117                 netdev_for_each_mc_addr(ha, netdev) {
1118                         /* set first 32 into Perfect Filter */
1119                         if (i < 33) {
1120                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1121                         } else {
1122                                 u32 bitnum = lan78xx_hash(ha->addr);
1123
1124                                 pdata->mchash_table[bitnum / 32] |=
1125                                                         (1 << (bitnum % 32));
1126                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1127                         }
1128                         i++;
1129                 }
1130         }
1131
1132         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1133
1134         /* defer register writes to a sleepable context */
1135         schedule_work(&pdata->set_multicast);
1136 }
1137
1138 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1139                                       u16 lcladv, u16 rmtadv)
1140 {
1141         u32 flow = 0, fct_flow = 0;
1142         int ret;
1143         u8 cap;
1144
1145         if (dev->fc_autoneg)
1146                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1147         else
1148                 cap = dev->fc_request_control;
1149
1150         if (cap & FLOW_CTRL_TX)
1151                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1152
1153         if (cap & FLOW_CTRL_RX)
1154                 flow |= FLOW_CR_RX_FCEN_;
1155
1156         if (dev->udev->speed == USB_SPEED_SUPER)
1157                 fct_flow = 0x817;
1158         else if (dev->udev->speed == USB_SPEED_HIGH)
1159                 fct_flow = 0x211;
1160
1161         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1162                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1163                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1164
1165         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1166
1167         /* threshold value should be set before enabling flow */
1168         ret = lan78xx_write_reg(dev, FLOW, flow);
1169
1170         return 0;
1171 }
1172
1173 static int lan78xx_link_reset(struct lan78xx_net *dev)
1174 {
1175         struct phy_device *phydev = dev->net->phydev;
1176         struct ethtool_link_ksettings ecmd;
1177         int ladv, radv, ret;
1178         u32 buf;
1179
1180         /* clear LAN78xx interrupt status */
1181         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1182         if (unlikely(ret < 0))
1183                 return -EIO;
1184
1185         phy_read_status(phydev);
1186
1187         if (!phydev->link && dev->link_on) {
1188                 dev->link_on = false;
1189
1190                 /* reset MAC */
1191                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1192                 if (unlikely(ret < 0))
1193                         return -EIO;
1194                 buf |= MAC_CR_RST_;
1195                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1196                 if (unlikely(ret < 0))
1197                         return -EIO;
1198
1199                 del_timer(&dev->stat_monitor);
1200         } else if (phydev->link && !dev->link_on) {
1201                 dev->link_on = true;
1202
1203                 phy_ethtool_ksettings_get(phydev, &ecmd);
1204
1205                 if (dev->udev->speed == USB_SPEED_SUPER) {
1206                         if (ecmd.base.speed == 1000) {
1207                                 /* disable U2 */
1208                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1209                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1210                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1211                                 /* enable U1 */
1212                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1213                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1214                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1215                         } else {
1216                                 /* enable U1 & U2 */
1217                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1218                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1219                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1220                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1221                         }
1222                 }
1223
1224                 ladv = phy_read(phydev, MII_ADVERTISE);
1225                 if (ladv < 0)
1226                         return ladv;
1227
1228                 radv = phy_read(phydev, MII_LPA);
1229                 if (radv < 0)
1230                         return radv;
1231
1232                 netif_dbg(dev, link, dev->net,
1233                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1234                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1235
1236                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1237                                                  radv);
1238
1239                 if (!timer_pending(&dev->stat_monitor)) {
1240                         dev->delta = 1;
1241                         mod_timer(&dev->stat_monitor,
1242                                   jiffies + STAT_UPDATE_TIMER);
1243                 }
1244         }
1245
1246         return ret;
1247 }
1248
1249 /* some work can't be done in tasklets, so we use keventd
1250  *
1251  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1252  * but tasklet_schedule() doesn't.      hope the failure is rare.
1253  */
1254 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1255 {
1256         set_bit(work, &dev->flags);
1257         if (!schedule_delayed_work(&dev->wq, 0))
1258                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1259 }
1260
1261 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1262 {
1263         u32 intdata;
1264
1265         if (urb->actual_length != 4) {
1266                 netdev_warn(dev->net,
1267                             "unexpected urb length %d", urb->actual_length);
1268                 return;
1269         }
1270
1271         memcpy(&intdata, urb->transfer_buffer, 4);
1272         le32_to_cpus(&intdata);
1273
1274         if (intdata & INT_ENP_PHY_INT) {
1275                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1276                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1277
1278                 if (dev->domain_data.phyirq > 0)
1279                         generic_handle_irq(dev->domain_data.phyirq);
1280         } else
1281                 netdev_warn(dev->net,
1282                             "unexpected interrupt: 0x%08x\n", intdata);
1283 }
1284
1285 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1286 {
1287         return MAX_EEPROM_SIZE;
1288 }
1289
1290 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1291                                       struct ethtool_eeprom *ee, u8 *data)
1292 {
1293         struct lan78xx_net *dev = netdev_priv(netdev);
1294         int ret;
1295
1296         ret = usb_autopm_get_interface(dev->intf);
1297         if (ret)
1298                 return ret;
1299
1300         ee->magic = LAN78XX_EEPROM_MAGIC;
1301
1302         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1303
1304         usb_autopm_put_interface(dev->intf);
1305
1306         return ret;
1307 }
1308
1309 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1310                                       struct ethtool_eeprom *ee, u8 *data)
1311 {
1312         struct lan78xx_net *dev = netdev_priv(netdev);
1313         int ret;
1314
1315         ret = usb_autopm_get_interface(dev->intf);
1316         if (ret)
1317                 return ret;
1318
1319         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1320          * to load data from EEPROM
1321          */
1322         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1323                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1324         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1325                  (ee->offset == 0) &&
1326                  (ee->len == 512) &&
1327                  (data[0] == OTP_INDICATOR_1))
1328                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1329
1330         usb_autopm_put_interface(dev->intf);
1331
1332         return ret;
1333 }
1334
1335 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1336                                 u8 *data)
1337 {
1338         if (stringset == ETH_SS_STATS)
1339                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1340 }
1341
1342 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1343 {
1344         if (sset == ETH_SS_STATS)
1345                 return ARRAY_SIZE(lan78xx_gstrings);
1346         else
1347                 return -EOPNOTSUPP;
1348 }
1349
1350 static void lan78xx_get_stats(struct net_device *netdev,
1351                               struct ethtool_stats *stats, u64 *data)
1352 {
1353         struct lan78xx_net *dev = netdev_priv(netdev);
1354
1355         lan78xx_update_stats(dev);
1356
1357         mutex_lock(&dev->stats.access_lock);
1358         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1359         mutex_unlock(&dev->stats.access_lock);
1360 }
1361
1362 static void lan78xx_get_wol(struct net_device *netdev,
1363                             struct ethtool_wolinfo *wol)
1364 {
1365         struct lan78xx_net *dev = netdev_priv(netdev);
1366         int ret;
1367         u32 buf;
1368         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1369
1370         if (usb_autopm_get_interface(dev->intf) < 0)
1371                         return;
1372
1373         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1374         if (unlikely(ret < 0)) {
1375                 wol->supported = 0;
1376                 wol->wolopts = 0;
1377         } else {
1378                 if (buf & USB_CFG_RMT_WKP_) {
1379                         wol->supported = WAKE_ALL;
1380                         wol->wolopts = pdata->wol;
1381                 } else {
1382                         wol->supported = 0;
1383                         wol->wolopts = 0;
1384                 }
1385         }
1386
1387         usb_autopm_put_interface(dev->intf);
1388 }
1389
1390 static int lan78xx_set_wol(struct net_device *netdev,
1391                            struct ethtool_wolinfo *wol)
1392 {
1393         struct lan78xx_net *dev = netdev_priv(netdev);
1394         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1395         int ret;
1396
1397         ret = usb_autopm_get_interface(dev->intf);
1398         if (ret < 0)
1399                 return ret;
1400
1401         pdata->wol = 0;
1402         if (wol->wolopts & WAKE_UCAST)
1403                 pdata->wol |= WAKE_UCAST;
1404         if (wol->wolopts & WAKE_MCAST)
1405                 pdata->wol |= WAKE_MCAST;
1406         if (wol->wolopts & WAKE_BCAST)
1407                 pdata->wol |= WAKE_BCAST;
1408         if (wol->wolopts & WAKE_MAGIC)
1409                 pdata->wol |= WAKE_MAGIC;
1410         if (wol->wolopts & WAKE_PHY)
1411                 pdata->wol |= WAKE_PHY;
1412         if (wol->wolopts & WAKE_ARP)
1413                 pdata->wol |= WAKE_ARP;
1414
1415         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1416
1417         phy_ethtool_set_wol(netdev->phydev, wol);
1418
1419         usb_autopm_put_interface(dev->intf);
1420
1421         return ret;
1422 }
1423
1424 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1425 {
1426         struct lan78xx_net *dev = netdev_priv(net);
1427         struct phy_device *phydev = net->phydev;
1428         int ret;
1429         u32 buf;
1430
1431         ret = usb_autopm_get_interface(dev->intf);
1432         if (ret < 0)
1433                 return ret;
1434
1435         ret = phy_ethtool_get_eee(phydev, edata);
1436         if (ret < 0)
1437                 goto exit;
1438
1439         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1440         if (buf & MAC_CR_EEE_EN_) {
1441                 edata->eee_enabled = true;
1442                 edata->eee_active = !!(edata->advertised &
1443                                        edata->lp_advertised);
1444                 edata->tx_lpi_enabled = true;
1445                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1446                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1447                 edata->tx_lpi_timer = buf;
1448         } else {
1449                 edata->eee_enabled = false;
1450                 edata->eee_active = false;
1451                 edata->tx_lpi_enabled = false;
1452                 edata->tx_lpi_timer = 0;
1453         }
1454
1455         ret = 0;
1456 exit:
1457         usb_autopm_put_interface(dev->intf);
1458
1459         return ret;
1460 }
1461
1462 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1463 {
1464         struct lan78xx_net *dev = netdev_priv(net);
1465         int ret;
1466         u32 buf;
1467
1468         ret = usb_autopm_get_interface(dev->intf);
1469         if (ret < 0)
1470                 return ret;
1471
1472         if (edata->eee_enabled) {
1473                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1474                 buf |= MAC_CR_EEE_EN_;
1475                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1476
1477                 phy_ethtool_set_eee(net->phydev, edata);
1478
1479                 buf = (u32)edata->tx_lpi_timer;
1480                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1481         } else {
1482                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1483                 buf &= ~MAC_CR_EEE_EN_;
1484                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1485         }
1486
1487         usb_autopm_put_interface(dev->intf);
1488
1489         return 0;
1490 }
1491
1492 static u32 lan78xx_get_link(struct net_device *net)
1493 {
1494         phy_read_status(net->phydev);
1495
1496         return net->phydev->link;
1497 }
1498
1499 static void lan78xx_get_drvinfo(struct net_device *net,
1500                                 struct ethtool_drvinfo *info)
1501 {
1502         struct lan78xx_net *dev = netdev_priv(net);
1503
1504         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1505         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1506 }
1507
1508 static u32 lan78xx_get_msglevel(struct net_device *net)
1509 {
1510         struct lan78xx_net *dev = netdev_priv(net);
1511
1512         return dev->msg_enable;
1513 }
1514
1515 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1516 {
1517         struct lan78xx_net *dev = netdev_priv(net);
1518
1519         dev->msg_enable = level;
1520 }
1521
1522 static int lan78xx_get_link_ksettings(struct net_device *net,
1523                                       struct ethtool_link_ksettings *cmd)
1524 {
1525         struct lan78xx_net *dev = netdev_priv(net);
1526         struct phy_device *phydev = net->phydev;
1527         int ret;
1528
1529         ret = usb_autopm_get_interface(dev->intf);
1530         if (ret < 0)
1531                 return ret;
1532
1533         phy_ethtool_ksettings_get(phydev, cmd);
1534
1535         usb_autopm_put_interface(dev->intf);
1536
1537         return ret;
1538 }
1539
1540 static int lan78xx_set_link_ksettings(struct net_device *net,
1541                                       const struct ethtool_link_ksettings *cmd)
1542 {
1543         struct lan78xx_net *dev = netdev_priv(net);
1544         struct phy_device *phydev = net->phydev;
1545         int ret = 0;
1546         int temp;
1547
1548         ret = usb_autopm_get_interface(dev->intf);
1549         if (ret < 0)
1550                 return ret;
1551
1552         /* change speed & duplex */
1553         ret = phy_ethtool_ksettings_set(phydev, cmd);
1554
1555         if (!cmd->base.autoneg) {
1556                 /* force link down */
1557                 temp = phy_read(phydev, MII_BMCR);
1558                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1559                 mdelay(1);
1560                 phy_write(phydev, MII_BMCR, temp);
1561         }
1562
1563         usb_autopm_put_interface(dev->intf);
1564
1565         return ret;
1566 }
1567
1568 static void lan78xx_get_pause(struct net_device *net,
1569                               struct ethtool_pauseparam *pause)
1570 {
1571         struct lan78xx_net *dev = netdev_priv(net);
1572         struct phy_device *phydev = net->phydev;
1573         struct ethtool_link_ksettings ecmd;
1574
1575         phy_ethtool_ksettings_get(phydev, &ecmd);
1576
1577         pause->autoneg = dev->fc_autoneg;
1578
1579         if (dev->fc_request_control & FLOW_CTRL_TX)
1580                 pause->tx_pause = 1;
1581
1582         if (dev->fc_request_control & FLOW_CTRL_RX)
1583                 pause->rx_pause = 1;
1584 }
1585
1586 static int lan78xx_set_pause(struct net_device *net,
1587                              struct ethtool_pauseparam *pause)
1588 {
1589         struct lan78xx_net *dev = netdev_priv(net);
1590         struct phy_device *phydev = net->phydev;
1591         struct ethtool_link_ksettings ecmd;
1592         int ret;
1593
1594         phy_ethtool_ksettings_get(phydev, &ecmd);
1595
1596         if (pause->autoneg && !ecmd.base.autoneg) {
1597                 ret = -EINVAL;
1598                 goto exit;
1599         }
1600
1601         dev->fc_request_control = 0;
1602         if (pause->rx_pause)
1603                 dev->fc_request_control |= FLOW_CTRL_RX;
1604
1605         if (pause->tx_pause)
1606                 dev->fc_request_control |= FLOW_CTRL_TX;
1607
1608         if (ecmd.base.autoneg) {
1609                 u32 mii_adv;
1610                 u32 advertising;
1611
1612                 ethtool_convert_link_mode_to_legacy_u32(
1613                         &advertising, ecmd.link_modes.advertising);
1614
1615                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1616                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1617                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1618
1619                 ethtool_convert_legacy_u32_to_link_mode(
1620                         ecmd.link_modes.advertising, advertising);
1621
1622                 phy_ethtool_ksettings_set(phydev, &ecmd);
1623         }
1624
1625         dev->fc_autoneg = pause->autoneg;
1626
1627         ret = 0;
1628 exit:
1629         return ret;
1630 }
1631
1632 static int lan78xx_get_regs_len(struct net_device *netdev)
1633 {
1634         if (!netdev->phydev)
1635                 return (sizeof(lan78xx_regs));
1636         else
1637                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1638 }
1639
1640 static void
1641 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1642                  void *buf)
1643 {
1644         u32 *data = buf;
1645         int i, j;
1646         struct lan78xx_net *dev = netdev_priv(netdev);
1647
1648         /* Read Device/MAC registers */
1649         for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1650                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1651
1652         if (!netdev->phydev)
1653                 return;
1654
1655         /* Read PHY registers */
1656         for (j = 0; j < 32; i++, j++)
1657                 data[i] = phy_read(netdev->phydev, j);
1658 }
1659
1660 static const struct ethtool_ops lan78xx_ethtool_ops = {
1661         .get_link       = lan78xx_get_link,
1662         .nway_reset     = phy_ethtool_nway_reset,
1663         .get_drvinfo    = lan78xx_get_drvinfo,
1664         .get_msglevel   = lan78xx_get_msglevel,
1665         .set_msglevel   = lan78xx_set_msglevel,
1666         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1667         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1668         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1669         .get_ethtool_stats = lan78xx_get_stats,
1670         .get_sset_count = lan78xx_get_sset_count,
1671         .get_strings    = lan78xx_get_strings,
1672         .get_wol        = lan78xx_get_wol,
1673         .set_wol        = lan78xx_set_wol,
1674         .get_eee        = lan78xx_get_eee,
1675         .set_eee        = lan78xx_set_eee,
1676         .get_pauseparam = lan78xx_get_pause,
1677         .set_pauseparam = lan78xx_set_pause,
1678         .get_link_ksettings = lan78xx_get_link_ksettings,
1679         .set_link_ksettings = lan78xx_set_link_ksettings,
1680         .get_regs_len   = lan78xx_get_regs_len,
1681         .get_regs       = lan78xx_get_regs,
1682 };
1683
1684 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1685 {
1686         if (!netif_running(netdev))
1687                 return -EINVAL;
1688
1689         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1690 }
1691
1692 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1693 {
1694         u32 addr_lo, addr_hi;
1695         int ret;
1696         u8 addr[6];
1697
1698         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1699         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1700
1701         addr[0] = addr_lo & 0xFF;
1702         addr[1] = (addr_lo >> 8) & 0xFF;
1703         addr[2] = (addr_lo >> 16) & 0xFF;
1704         addr[3] = (addr_lo >> 24) & 0xFF;
1705         addr[4] = addr_hi & 0xFF;
1706         addr[5] = (addr_hi >> 8) & 0xFF;
1707
1708         if (!is_valid_ether_addr(addr)) {
1709                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1710                         /* valid address present in Device Tree */
1711                         netif_dbg(dev, ifup, dev->net,
1712                                   "MAC address read from Device Tree");
1713                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1714                                                  ETH_ALEN, addr) == 0) ||
1715                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1716                                               ETH_ALEN, addr) == 0)) &&
1717                            is_valid_ether_addr(addr)) {
1718                         /* eeprom values are valid so use them */
1719                         netif_dbg(dev, ifup, dev->net,
1720                                   "MAC address read from EEPROM");
1721                 } else {
1722                         /* generate random MAC */
1723                         random_ether_addr(addr);
1724                         netif_dbg(dev, ifup, dev->net,
1725                                   "MAC address set to random addr");
1726                 }
1727
1728                 addr_lo = addr[0] | (addr[1] << 8) |
1729                           (addr[2] << 16) | (addr[3] << 24);
1730                 addr_hi = addr[4] | (addr[5] << 8);
1731
1732                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1733                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1734         }
1735
1736         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1737         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1738
1739         ether_addr_copy(dev->net->dev_addr, addr);
1740 }
1741
1742 /* MDIO read and write wrappers for phylib */
1743 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1744 {
1745         struct lan78xx_net *dev = bus->priv;
1746         u32 val, addr;
1747         int ret;
1748
1749         ret = usb_autopm_get_interface(dev->intf);
1750         if (ret < 0)
1751                 return ret;
1752
1753         mutex_lock(&dev->phy_mutex);
1754
1755         /* confirm MII not busy */
1756         ret = lan78xx_phy_wait_not_busy(dev);
1757         if (ret < 0)
1758                 goto done;
1759
1760         /* set the address, index & direction (read from PHY) */
1761         addr = mii_access(phy_id, idx, MII_READ);
1762         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1763
1764         ret = lan78xx_phy_wait_not_busy(dev);
1765         if (ret < 0)
1766                 goto done;
1767
1768         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1769
1770         ret = (int)(val & 0xFFFF);
1771
1772 done:
1773         mutex_unlock(&dev->phy_mutex);
1774         usb_autopm_put_interface(dev->intf);
1775
1776         return ret;
1777 }
1778
1779 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1780                                  u16 regval)
1781 {
1782         struct lan78xx_net *dev = bus->priv;
1783         u32 val, addr;
1784         int ret;
1785
1786         ret = usb_autopm_get_interface(dev->intf);
1787         if (ret < 0)
1788                 return ret;
1789
1790         mutex_lock(&dev->phy_mutex);
1791
1792         /* confirm MII not busy */
1793         ret = lan78xx_phy_wait_not_busy(dev);
1794         if (ret < 0)
1795                 goto done;
1796
1797         val = (u32)regval;
1798         ret = lan78xx_write_reg(dev, MII_DATA, val);
1799
1800         /* set the address, index & direction (write to PHY) */
1801         addr = mii_access(phy_id, idx, MII_WRITE);
1802         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1803
1804         ret = lan78xx_phy_wait_not_busy(dev);
1805         if (ret < 0)
1806                 goto done;
1807
1808 done:
1809         mutex_unlock(&dev->phy_mutex);
1810         usb_autopm_put_interface(dev->intf);
1811         return 0;
1812 }
1813
1814 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1815 {
1816         struct device_node *node;
1817         int ret;
1818
1819         dev->mdiobus = mdiobus_alloc();
1820         if (!dev->mdiobus) {
1821                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1822                 return -ENOMEM;
1823         }
1824
1825         dev->mdiobus->priv = (void *)dev;
1826         dev->mdiobus->read = lan78xx_mdiobus_read;
1827         dev->mdiobus->write = lan78xx_mdiobus_write;
1828         dev->mdiobus->name = "lan78xx-mdiobus";
1829
1830         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1831                  dev->udev->bus->busnum, dev->udev->devnum);
1832
1833         switch (dev->chipid) {
1834         case ID_REV_CHIP_ID_7800_:
1835         case ID_REV_CHIP_ID_7850_:
1836                 /* set to internal PHY id */
1837                 dev->mdiobus->phy_mask = ~(1 << 1);
1838                 break;
1839         case ID_REV_CHIP_ID_7801_:
1840                 /* scan thru PHYAD[2..0] */
1841                 dev->mdiobus->phy_mask = ~(0xFF);
1842                 break;
1843         }
1844
1845         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1846         ret = of_mdiobus_register(dev->mdiobus, node);
1847         if (node)
1848                 of_node_put(node);
1849         if (ret) {
1850                 netdev_err(dev->net, "can't register MDIO bus\n");
1851                 goto exit1;
1852         }
1853
1854         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1855         return 0;
1856 exit1:
1857         mdiobus_free(dev->mdiobus);
1858         return ret;
1859 }
1860
1861 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1862 {
1863         mdiobus_unregister(dev->mdiobus);
1864         mdiobus_free(dev->mdiobus);
1865 }
1866
1867 static void lan78xx_link_status_change(struct net_device *net)
1868 {
1869         struct phy_device *phydev = net->phydev;
1870         int ret, temp;
1871
1872         /* At forced 100 F/H mode, chip may fail to set mode correctly
1873          * when cable is switched between long(~50+m) and short one.
1874          * As workaround, set to 10 before setting to 100
1875          * at forced 100 F/H mode.
1876          */
1877         if (!phydev->autoneg && (phydev->speed == 100)) {
1878                 /* disable phy interrupt */
1879                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1880                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1881                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1882
1883                 temp = phy_read(phydev, MII_BMCR);
1884                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1885                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1886                 temp |= BMCR_SPEED100;
1887                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1888
1889                 /* clear pending interrupt generated while workaround */
1890                 temp = phy_read(phydev, LAN88XX_INT_STS);
1891
1892                 /* enable phy interrupt back */
1893                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1894                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1895                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1896         }
1897 }
1898
1899 static int irq_map(struct irq_domain *d, unsigned int irq,
1900                    irq_hw_number_t hwirq)
1901 {
1902         struct irq_domain_data *data = d->host_data;
1903
1904         irq_set_chip_data(irq, data);
1905         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1906         irq_set_noprobe(irq);
1907
1908         return 0;
1909 }
1910
1911 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1912 {
1913         irq_set_chip_and_handler(irq, NULL, NULL);
1914         irq_set_chip_data(irq, NULL);
1915 }
1916
1917 static const struct irq_domain_ops chip_domain_ops = {
1918         .map    = irq_map,
1919         .unmap  = irq_unmap,
1920 };
1921
1922 static void lan78xx_irq_mask(struct irq_data *irqd)
1923 {
1924         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1925
1926         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1927 }
1928
1929 static void lan78xx_irq_unmask(struct irq_data *irqd)
1930 {
1931         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1932
1933         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1934 }
1935
1936 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1937 {
1938         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1939
1940         mutex_lock(&data->irq_lock);
1941 }
1942
1943 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1944 {
1945         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1946         struct lan78xx_net *dev =
1947                         container_of(data, struct lan78xx_net, domain_data);
1948         u32 buf;
1949         int ret;
1950
1951         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1952          * are only two callbacks executed in non-atomic contex.
1953          */
1954         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1955         if (buf != data->irqenable)
1956                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1957
1958         mutex_unlock(&data->irq_lock);
1959 }
1960
1961 static struct irq_chip lan78xx_irqchip = {
1962         .name                   = "lan78xx-irqs",
1963         .irq_mask               = lan78xx_irq_mask,
1964         .irq_unmask             = lan78xx_irq_unmask,
1965         .irq_bus_lock           = lan78xx_irq_bus_lock,
1966         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1967 };
1968
1969 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1970 {
1971         struct device_node *of_node;
1972         struct irq_domain *irqdomain;
1973         unsigned int irqmap = 0;
1974         u32 buf;
1975         int ret = 0;
1976
1977         of_node = dev->udev->dev.parent->of_node;
1978
1979         mutex_init(&dev->domain_data.irq_lock);
1980
1981         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1982         dev->domain_data.irqenable = buf;
1983
1984         dev->domain_data.irqchip = &lan78xx_irqchip;
1985         dev->domain_data.irq_handler = handle_simple_irq;
1986
1987         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1988                                           &chip_domain_ops, &dev->domain_data);
1989         if (irqdomain) {
1990                 /* create mapping for PHY interrupt */
1991                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1992                 if (!irqmap) {
1993                         irq_domain_remove(irqdomain);
1994
1995                         irqdomain = NULL;
1996                         ret = -EINVAL;
1997                 }
1998         } else {
1999                 ret = -EINVAL;
2000         }
2001
2002         dev->domain_data.irqdomain = irqdomain;
2003         dev->domain_data.phyirq = irqmap;
2004
2005         return ret;
2006 }
2007
2008 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2009 {
2010         if (dev->domain_data.phyirq > 0) {
2011                 irq_dispose_mapping(dev->domain_data.phyirq);
2012
2013                 if (dev->domain_data.irqdomain)
2014                         irq_domain_remove(dev->domain_data.irqdomain);
2015         }
2016         dev->domain_data.phyirq = 0;
2017         dev->domain_data.irqdomain = NULL;
2018 }
2019
2020 static int lan8835_fixup(struct phy_device *phydev)
2021 {
2022         int buf;
2023         int ret;
2024         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2025
2026         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2027         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2028         buf &= ~0x1800;
2029         buf |= 0x0800;
2030         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2031
2032         /* RGMII MAC TXC Delay Enable */
2033         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2034                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2035
2036         /* RGMII TX DLL Tune Adjust */
2037         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2038
2039         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2040
2041         return 1;
2042 }
2043
2044 static int ksz9031rnx_fixup(struct phy_device *phydev)
2045 {
2046         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2047
2048         /* Micrel9301RNX PHY configuration */
2049         /* RGMII Control Signal Pad Skew */
2050         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2051         /* RGMII RX Data Pad Skew */
2052         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2053         /* RGMII RX Clock Pad Skew */
2054         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2055
2056         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2057
2058         return 1;
2059 }
2060
2061 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2062 {
2063         u32 buf;
2064         int ret;
2065         struct fixed_phy_status fphy_status = {
2066                 .link = 1,
2067                 .speed = SPEED_1000,
2068                 .duplex = DUPLEX_FULL,
2069         };
2070         struct phy_device *phydev;
2071
2072         phydev = phy_find_first(dev->mdiobus);
2073         if (!phydev) {
2074                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2075                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2076                                             NULL);
2077                 if (IS_ERR(phydev)) {
2078                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2079                         return NULL;
2080                 }
2081                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2082                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2083                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2084                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2085                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2086                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2087                 buf |= HW_CFG_CLK125_EN_;
2088                 buf |= HW_CFG_REFCLK25_EN_;
2089                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2090         } else {
2091                 if (!phydev->drv) {
2092                         netdev_err(dev->net, "no PHY driver found\n");
2093                         return NULL;
2094                 }
2095                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2096                 /* external PHY fixup for KSZ9031RNX */
2097                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2098                                                  ksz9031rnx_fixup);
2099                 if (ret < 0) {
2100                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2101                         return NULL;
2102                 }
2103                 /* external PHY fixup for LAN8835 */
2104                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2105                                                  lan8835_fixup);
2106                 if (ret < 0) {
2107                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2108                         return NULL;
2109                 }
2110                 /* add more external PHY fixup here if needed */
2111
2112                 phydev->is_internal = false;
2113         }
2114         return phydev;
2115 }
2116
2117 static int lan78xx_phy_init(struct lan78xx_net *dev)
2118 {
2119         int ret;
2120         u32 mii_adv;
2121         struct phy_device *phydev;
2122
2123         switch (dev->chipid) {
2124         case ID_REV_CHIP_ID_7801_:
2125                 phydev = lan7801_phy_init(dev);
2126                 if (!phydev) {
2127                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2128                         return -EIO;
2129                 }
2130                 break;
2131
2132         case ID_REV_CHIP_ID_7800_:
2133         case ID_REV_CHIP_ID_7850_:
2134                 phydev = phy_find_first(dev->mdiobus);
2135                 if (!phydev) {
2136                         netdev_err(dev->net, "no PHY found\n");
2137                         return -EIO;
2138                 }
2139                 phydev->is_internal = true;
2140                 dev->interface = PHY_INTERFACE_MODE_GMII;
2141                 break;
2142
2143         default:
2144                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2145                 return -EIO;
2146         }
2147
2148         /* if phyirq is not set, use polling mode in phylib */
2149         if (dev->domain_data.phyirq > 0)
2150                 phydev->irq = dev->domain_data.phyirq;
2151         else
2152                 phydev->irq = 0;
2153         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2154
2155         /* set to AUTOMDIX */
2156         phydev->mdix = ETH_TP_MDI_AUTO;
2157
2158         ret = phy_connect_direct(dev->net, phydev,
2159                                  lan78xx_link_status_change,
2160                                  dev->interface);
2161         if (ret) {
2162                 netdev_err(dev->net, "can't attach PHY to %s\n",
2163                            dev->mdiobus->id);
2164                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2165                         if (phy_is_pseudo_fixed_link(phydev)) {
2166                                 fixed_phy_unregister(phydev);
2167                         } else {
2168                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2169                                                              0xfffffff0);
2170                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2171                                                              0xfffffff0);
2172                         }
2173                 }
2174                 return -EIO;
2175         }
2176
2177         /* MAC doesn't support 1000T Half */
2178         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2179
2180         /* support both flow controls */
2181         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2182         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2183         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2184         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2185
2186         if (phydev->mdio.dev.of_node) {
2187                 u32 reg;
2188                 int len;
2189
2190                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2191                                                       "microchip,led-modes",
2192                                                       sizeof(u32));
2193                 if (len >= 0) {
2194                         /* Ensure the appropriate LEDs are enabled */
2195                         lan78xx_read_reg(dev, HW_CFG, &reg);
2196                         reg &= ~(HW_CFG_LED0_EN_ |
2197                                  HW_CFG_LED1_EN_ |
2198                                  HW_CFG_LED2_EN_ |
2199                                  HW_CFG_LED3_EN_);
2200                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2201                                 (len > 1) * HW_CFG_LED1_EN_ |
2202                                 (len > 2) * HW_CFG_LED2_EN_ |
2203                                 (len > 3) * HW_CFG_LED3_EN_;
2204                         lan78xx_write_reg(dev, HW_CFG, reg);
2205                 }
2206         }
2207
2208         genphy_config_aneg(phydev);
2209
2210         dev->fc_autoneg = phydev->autoneg;
2211
2212         return 0;
2213 }
2214
2215 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2216 {
2217         int ret = 0;
2218         u32 buf;
2219         bool rxenabled;
2220
2221         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2222
2223         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2224
2225         if (rxenabled) {
2226                 buf &= ~MAC_RX_RXEN_;
2227                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2228         }
2229
2230         /* add 4 to size for FCS */
2231         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2232         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2233
2234         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2235
2236         if (rxenabled) {
2237                 buf |= MAC_RX_RXEN_;
2238                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2239         }
2240
2241         return 0;
2242 }
2243
2244 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2245 {
2246         struct sk_buff *skb;
2247         unsigned long flags;
2248         int count = 0;
2249
2250         spin_lock_irqsave(&q->lock, flags);
2251         while (!skb_queue_empty(q)) {
2252                 struct skb_data *entry;
2253                 struct urb *urb;
2254                 int ret;
2255
2256                 skb_queue_walk(q, skb) {
2257                         entry = (struct skb_data *)skb->cb;
2258                         if (entry->state != unlink_start)
2259                                 goto found;
2260                 }
2261                 break;
2262 found:
2263                 entry->state = unlink_start;
2264                 urb = entry->urb;
2265
2266                 /* Get reference count of the URB to avoid it to be
2267                  * freed during usb_unlink_urb, which may trigger
2268                  * use-after-free problem inside usb_unlink_urb since
2269                  * usb_unlink_urb is always racing with .complete
2270                  * handler(include defer_bh).
2271                  */
2272                 usb_get_urb(urb);
2273                 spin_unlock_irqrestore(&q->lock, flags);
2274                 /* during some PM-driven resume scenarios,
2275                  * these (async) unlinks complete immediately
2276                  */
2277                 ret = usb_unlink_urb(urb);
2278                 if (ret != -EINPROGRESS && ret != 0)
2279                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2280                 else
2281                         count++;
2282                 usb_put_urb(urb);
2283                 spin_lock_irqsave(&q->lock, flags);
2284         }
2285         spin_unlock_irqrestore(&q->lock, flags);
2286         return count;
2287 }
2288
2289 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2290 {
2291         struct lan78xx_net *dev = netdev_priv(netdev);
2292         int ll_mtu = new_mtu + netdev->hard_header_len;
2293         int old_hard_mtu = dev->hard_mtu;
2294         int old_rx_urb_size = dev->rx_urb_size;
2295         int ret;
2296
2297         /* no second zero-length packet read wanted after mtu-sized packets */
2298         if ((ll_mtu % dev->maxpacket) == 0)
2299                 return -EDOM;
2300
2301         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2302
2303         netdev->mtu = new_mtu;
2304
2305         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2306         if (dev->rx_urb_size == old_hard_mtu) {
2307                 dev->rx_urb_size = dev->hard_mtu;
2308                 if (dev->rx_urb_size > old_rx_urb_size) {
2309                         if (netif_running(dev->net)) {
2310                                 unlink_urbs(dev, &dev->rxq);
2311                                 tasklet_schedule(&dev->bh);
2312                         }
2313                 }
2314         }
2315
2316         return 0;
2317 }
2318
2319 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2320 {
2321         struct lan78xx_net *dev = netdev_priv(netdev);
2322         struct sockaddr *addr = p;
2323         u32 addr_lo, addr_hi;
2324         int ret;
2325
2326         if (netif_running(netdev))
2327                 return -EBUSY;
2328
2329         if (!is_valid_ether_addr(addr->sa_data))
2330                 return -EADDRNOTAVAIL;
2331
2332         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2333
2334         addr_lo = netdev->dev_addr[0] |
2335                   netdev->dev_addr[1] << 8 |
2336                   netdev->dev_addr[2] << 16 |
2337                   netdev->dev_addr[3] << 24;
2338         addr_hi = netdev->dev_addr[4] |
2339                   netdev->dev_addr[5] << 8;
2340
2341         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2342         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2343
2344         return 0;
2345 }
2346
2347 /* Enable or disable Rx checksum offload engine */
2348 static int lan78xx_set_features(struct net_device *netdev,
2349                                 netdev_features_t features)
2350 {
2351         struct lan78xx_net *dev = netdev_priv(netdev);
2352         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2353         unsigned long flags;
2354         int ret;
2355
2356         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2357
2358         if (features & NETIF_F_RXCSUM) {
2359                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2360                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2361         } else {
2362                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2363                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2364         }
2365
2366         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2367                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2368         else
2369                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2370
2371         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2372
2373         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2374
2375         return 0;
2376 }
2377
2378 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2379 {
2380         struct lan78xx_priv *pdata =
2381                         container_of(param, struct lan78xx_priv, set_vlan);
2382         struct lan78xx_net *dev = pdata->dev;
2383
2384         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2385                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2386 }
2387
2388 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2389                                    __be16 proto, u16 vid)
2390 {
2391         struct lan78xx_net *dev = netdev_priv(netdev);
2392         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2393         u16 vid_bit_index;
2394         u16 vid_dword_index;
2395
2396         vid_dword_index = (vid >> 5) & 0x7F;
2397         vid_bit_index = vid & 0x1F;
2398
2399         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2400
2401         /* defer register writes to a sleepable context */
2402         schedule_work(&pdata->set_vlan);
2403
2404         return 0;
2405 }
2406
2407 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2408                                     __be16 proto, u16 vid)
2409 {
2410         struct lan78xx_net *dev = netdev_priv(netdev);
2411         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2412         u16 vid_bit_index;
2413         u16 vid_dword_index;
2414
2415         vid_dword_index = (vid >> 5) & 0x7F;
2416         vid_bit_index = vid & 0x1F;
2417
2418         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2419
2420         /* defer register writes to a sleepable context */
2421         schedule_work(&pdata->set_vlan);
2422
2423         return 0;
2424 }
2425
2426 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2427 {
2428         int ret;
2429         u32 buf;
2430         u32 regs[6] = { 0 };
2431
2432         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2433         if (buf & USB_CFG1_LTM_ENABLE_) {
2434                 u8 temp[2];
2435                 /* Get values from EEPROM first */
2436                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2437                         if (temp[0] == 24) {
2438                                 ret = lan78xx_read_raw_eeprom(dev,
2439                                                               temp[1] * 2,
2440                                                               24,
2441                                                               (u8 *)regs);
2442                                 if (ret < 0)
2443                                         return;
2444                         }
2445                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2446                         if (temp[0] == 24) {
2447                                 ret = lan78xx_read_raw_otp(dev,
2448                                                            temp[1] * 2,
2449                                                            24,
2450                                                            (u8 *)regs);
2451                                 if (ret < 0)
2452                                         return;
2453                         }
2454                 }
2455         }
2456
2457         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2458         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2459         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2460         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2461         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2462         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2463 }
2464
2465 static int lan78xx_reset(struct lan78xx_net *dev)
2466 {
2467         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2468         u32 buf;
2469         int ret = 0;
2470         unsigned long timeout;
2471         u8 sig;
2472
2473         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2474         buf |= HW_CFG_LRST_;
2475         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2476
2477         timeout = jiffies + HZ;
2478         do {
2479                 mdelay(1);
2480                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2481                 if (time_after(jiffies, timeout)) {
2482                         netdev_warn(dev->net,
2483                                     "timeout on completion of LiteReset");
2484                         return -EIO;
2485                 }
2486         } while (buf & HW_CFG_LRST_);
2487
2488         lan78xx_init_mac_address(dev);
2489
2490         /* save DEVID for later usage */
2491         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2492         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2493         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2494
2495         /* Respond to the IN token with a NAK */
2496         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2497         buf |= USB_CFG_BIR_;
2498         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2499
2500         /* Init LTM */
2501         lan78xx_init_ltm(dev);
2502
2503         if (dev->udev->speed == USB_SPEED_SUPER) {
2504                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2505                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2506                 dev->rx_qlen = 4;
2507                 dev->tx_qlen = 4;
2508         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2509                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2510                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2511                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2512                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2513         } else {
2514                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2515                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2516                 dev->rx_qlen = 4;
2517                 dev->tx_qlen = 4;
2518         }
2519
2520         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2521         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2522
2523         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2524         buf |= HW_CFG_MEF_;
2525         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2526
2527         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2528         buf |= USB_CFG_BCE_;
2529         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2530
2531         /* set FIFO sizes */
2532         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2533         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2534
2535         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2536         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2537
2538         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2539         ret = lan78xx_write_reg(dev, FLOW, 0);
2540         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2541
2542         /* Don't need rfe_ctl_lock during initialisation */
2543         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2544         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2545         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2546
2547         /* Enable or disable checksum offload engines */
2548         lan78xx_set_features(dev->net, dev->net->features);
2549
2550         lan78xx_set_multicast(dev->net);
2551
2552         /* reset PHY */
2553         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2554         buf |= PMT_CTL_PHY_RST_;
2555         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2556
2557         timeout = jiffies + HZ;
2558         do {
2559                 mdelay(1);
2560                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2561                 if (time_after(jiffies, timeout)) {
2562                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2563                         return -EIO;
2564                 }
2565         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2566
2567         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2568         /* LAN7801 only has RGMII mode */
2569         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2570                 buf &= ~MAC_CR_GMII_EN_;
2571
2572         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2573                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2574                 if (!ret && sig != EEPROM_INDICATOR) {
2575                         /* Implies there is no external eeprom. Set mac speed */
2576                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2577                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2578                 }
2579         }
2580         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2581
2582         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2583         buf |= MAC_TX_TXEN_;
2584         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2585
2586         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2587         buf |= FCT_TX_CTL_EN_;
2588         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2589
2590         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2591
2592         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2593         buf |= MAC_RX_RXEN_;
2594         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2595
2596         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2597         buf |= FCT_RX_CTL_EN_;
2598         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2599
2600         return 0;
2601 }
2602
2603 static void lan78xx_init_stats(struct lan78xx_net *dev)
2604 {
2605         u32 *p;
2606         int i;
2607
2608         /* initialize for stats update
2609          * some counters are 20bits and some are 32bits
2610          */
2611         p = (u32 *)&dev->stats.rollover_max;
2612         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2613                 p[i] = 0xFFFFF;
2614
2615         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2616         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2617         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2618         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2619         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2620         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2621         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2622         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2623         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2624         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2625
2626         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2627 }
2628
2629 static int lan78xx_open(struct net_device *net)
2630 {
2631         struct lan78xx_net *dev = netdev_priv(net);
2632         int ret;
2633
2634         ret = usb_autopm_get_interface(dev->intf);
2635         if (ret < 0)
2636                 goto out;
2637
2638         phy_start(net->phydev);
2639
2640         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2641
2642         /* for Link Check */
2643         if (dev->urb_intr) {
2644                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2645                 if (ret < 0) {
2646                         netif_err(dev, ifup, dev->net,
2647                                   "intr submit %d\n", ret);
2648                         goto done;
2649                 }
2650         }
2651
2652         lan78xx_init_stats(dev);
2653
2654         set_bit(EVENT_DEV_OPEN, &dev->flags);
2655
2656         netif_start_queue(net);
2657
2658         dev->link_on = false;
2659
2660         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2661 done:
2662         usb_autopm_put_interface(dev->intf);
2663
2664 out:
2665         return ret;
2666 }
2667
2668 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2669 {
2670         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2671         DECLARE_WAITQUEUE(wait, current);
2672         int temp;
2673
2674         /* ensure there are no more active urbs */
2675         add_wait_queue(&unlink_wakeup, &wait);
2676         set_current_state(TASK_UNINTERRUPTIBLE);
2677         dev->wait = &unlink_wakeup;
2678         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2679
2680         /* maybe wait for deletions to finish. */
2681         while (!skb_queue_empty(&dev->rxq) &&
2682                !skb_queue_empty(&dev->txq) &&
2683                !skb_queue_empty(&dev->done)) {
2684                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2685                 set_current_state(TASK_UNINTERRUPTIBLE);
2686                 netif_dbg(dev, ifdown, dev->net,
2687                           "waited for %d urb completions\n", temp);
2688         }
2689         set_current_state(TASK_RUNNING);
2690         dev->wait = NULL;
2691         remove_wait_queue(&unlink_wakeup, &wait);
2692 }
2693
2694 static int lan78xx_stop(struct net_device *net)
2695 {
2696         struct lan78xx_net              *dev = netdev_priv(net);
2697
2698         if (timer_pending(&dev->stat_monitor))
2699                 del_timer_sync(&dev->stat_monitor);
2700
2701         if (net->phydev)
2702                 phy_stop(net->phydev);
2703
2704         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2705         netif_stop_queue(net);
2706
2707         netif_info(dev, ifdown, dev->net,
2708                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2709                    net->stats.rx_packets, net->stats.tx_packets,
2710                    net->stats.rx_errors, net->stats.tx_errors);
2711
2712         lan78xx_terminate_urbs(dev);
2713
2714         usb_kill_urb(dev->urb_intr);
2715
2716         skb_queue_purge(&dev->rxq_pause);
2717
2718         /* deferred work (task, timer, softirq) must also stop.
2719          * can't flush_scheduled_work() until we drop rtnl (later),
2720          * else workers could deadlock; so make workers a NOP.
2721          */
2722         dev->flags = 0;
2723         cancel_delayed_work_sync(&dev->wq);
2724         tasklet_kill(&dev->bh);
2725
2726         usb_autopm_put_interface(dev->intf);
2727
2728         return 0;
2729 }
2730
2731 static int lan78xx_linearize(struct sk_buff *skb)
2732 {
2733         return skb_linearize(skb);
2734 }
2735
2736 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2737                                        struct sk_buff *skb, gfp_t flags)
2738 {
2739         u32 tx_cmd_a, tx_cmd_b;
2740
2741         if (skb_cow_head(skb, TX_OVERHEAD)) {
2742                 dev_kfree_skb_any(skb);
2743                 return NULL;
2744         }
2745
2746         if (lan78xx_linearize(skb) < 0)
2747                 return NULL;
2748
2749         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2750
2751         if (skb->ip_summed == CHECKSUM_PARTIAL)
2752                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2753
2754         tx_cmd_b = 0;
2755         if (skb_is_gso(skb)) {
2756                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2757
2758                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2759
2760                 tx_cmd_a |= TX_CMD_A_LSO_;
2761         }
2762
2763         if (skb_vlan_tag_present(skb)) {
2764                 tx_cmd_a |= TX_CMD_A_IVTG_;
2765                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2766         }
2767
2768         skb_push(skb, 4);
2769         cpu_to_le32s(&tx_cmd_b);
2770         memcpy(skb->data, &tx_cmd_b, 4);
2771
2772         skb_push(skb, 4);
2773         cpu_to_le32s(&tx_cmd_a);
2774         memcpy(skb->data, &tx_cmd_a, 4);
2775
2776         return skb;
2777 }
2778
2779 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2780                                struct sk_buff_head *list, enum skb_state state)
2781 {
2782         unsigned long flags;
2783         enum skb_state old_state;
2784         struct skb_data *entry = (struct skb_data *)skb->cb;
2785
2786         spin_lock_irqsave(&list->lock, flags);
2787         old_state = entry->state;
2788         entry->state = state;
2789
2790         __skb_unlink(skb, list);
2791         spin_unlock(&list->lock);
2792         spin_lock(&dev->done.lock);
2793
2794         __skb_queue_tail(&dev->done, skb);
2795         if (skb_queue_len(&dev->done) == 1)
2796                 tasklet_schedule(&dev->bh);
2797         spin_unlock_irqrestore(&dev->done.lock, flags);
2798
2799         return old_state;
2800 }
2801
2802 static void tx_complete(struct urb *urb)
2803 {
2804         struct sk_buff *skb = (struct sk_buff *)urb->context;
2805         struct skb_data *entry = (struct skb_data *)skb->cb;
2806         struct lan78xx_net *dev = entry->dev;
2807
2808         if (urb->status == 0) {
2809                 dev->net->stats.tx_packets += entry->num_of_packet;
2810                 dev->net->stats.tx_bytes += entry->length;
2811         } else {
2812                 dev->net->stats.tx_errors++;
2813
2814                 switch (urb->status) {
2815                 case -EPIPE:
2816                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2817                         break;
2818
2819                 /* software-driven interface shutdown */
2820                 case -ECONNRESET:
2821                 case -ESHUTDOWN:
2822                         break;
2823
2824                 case -EPROTO:
2825                 case -ETIME:
2826                 case -EILSEQ:
2827                         netif_stop_queue(dev->net);
2828                         break;
2829                 default:
2830                         netif_dbg(dev, tx_err, dev->net,
2831                                   "tx err %d\n", entry->urb->status);
2832                         break;
2833                 }
2834         }
2835
2836         usb_autopm_put_interface_async(dev->intf);
2837
2838         defer_bh(dev, skb, &dev->txq, tx_done);
2839 }
2840
2841 static void lan78xx_queue_skb(struct sk_buff_head *list,
2842                               struct sk_buff *newsk, enum skb_state state)
2843 {
2844         struct skb_data *entry = (struct skb_data *)newsk->cb;
2845
2846         __skb_queue_tail(list, newsk);
2847         entry->state = state;
2848 }
2849
2850 static netdev_tx_t
2851 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2852 {
2853         struct lan78xx_net *dev = netdev_priv(net);
2854         struct sk_buff *skb2 = NULL;
2855
2856         if (skb) {
2857                 skb_tx_timestamp(skb);
2858                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2859         }
2860
2861         if (skb2) {
2862                 skb_queue_tail(&dev->txq_pend, skb2);
2863
2864                 /* throttle TX patch at slower than SUPER SPEED USB */
2865                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2866                     (skb_queue_len(&dev->txq_pend) > 10))
2867                         netif_stop_queue(net);
2868         } else {
2869                 netif_dbg(dev, tx_err, dev->net,
2870                           "lan78xx_tx_prep return NULL\n");
2871                 dev->net->stats.tx_errors++;
2872                 dev->net->stats.tx_dropped++;
2873         }
2874
2875         tasklet_schedule(&dev->bh);
2876
2877         return NETDEV_TX_OK;
2878 }
2879
2880 static int
2881 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2882 {
2883         int tmp;
2884         struct usb_host_interface *alt = NULL;
2885         struct usb_host_endpoint *in = NULL, *out = NULL;
2886         struct usb_host_endpoint *status = NULL;
2887
2888         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2889                 unsigned ep;
2890
2891                 in = NULL;
2892                 out = NULL;
2893                 status = NULL;
2894                 alt = intf->altsetting + tmp;
2895
2896                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2897                         struct usb_host_endpoint *e;
2898                         int intr = 0;
2899
2900                         e = alt->endpoint + ep;
2901                         switch (e->desc.bmAttributes) {
2902                         case USB_ENDPOINT_XFER_INT:
2903                                 if (!usb_endpoint_dir_in(&e->desc))
2904                                         continue;
2905                                 intr = 1;
2906                                 /* FALLTHROUGH */
2907                         case USB_ENDPOINT_XFER_BULK:
2908                                 break;
2909                         default:
2910                                 continue;
2911                         }
2912                         if (usb_endpoint_dir_in(&e->desc)) {
2913                                 if (!intr && !in)
2914                                         in = e;
2915                                 else if (intr && !status)
2916                                         status = e;
2917                         } else {
2918                                 if (!out)
2919                                         out = e;
2920                         }
2921                 }
2922                 if (in && out)
2923                         break;
2924         }
2925         if (!alt || !in || !out)
2926                 return -EINVAL;
2927
2928         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2929                                        in->desc.bEndpointAddress &
2930                                        USB_ENDPOINT_NUMBER_MASK);
2931         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2932                                         out->desc.bEndpointAddress &
2933                                         USB_ENDPOINT_NUMBER_MASK);
2934         dev->ep_intr = status;
2935
2936         return 0;
2937 }
2938
2939 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2940 {
2941         struct lan78xx_priv *pdata = NULL;
2942         int ret;
2943         int i;
2944
2945         ret = lan78xx_get_endpoints(dev, intf);
2946
2947         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2948
2949         pdata = (struct lan78xx_priv *)(dev->data[0]);
2950         if (!pdata) {
2951                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2952                 return -ENOMEM;
2953         }
2954
2955         pdata->dev = dev;
2956
2957         spin_lock_init(&pdata->rfe_ctl_lock);
2958         mutex_init(&pdata->dataport_mutex);
2959
2960         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2961
2962         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2963                 pdata->vlan_table[i] = 0;
2964
2965         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2966
2967         dev->net->features = 0;
2968
2969         if (DEFAULT_TX_CSUM_ENABLE)
2970                 dev->net->features |= NETIF_F_HW_CSUM;
2971
2972         if (DEFAULT_RX_CSUM_ENABLE)
2973                 dev->net->features |= NETIF_F_RXCSUM;
2974
2975         if (DEFAULT_TSO_CSUM_ENABLE)
2976                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2977
2978         dev->net->hw_features = dev->net->features;
2979
2980         ret = lan78xx_setup_irq_domain(dev);
2981         if (ret < 0) {
2982                 netdev_warn(dev->net,
2983                             "lan78xx_setup_irq_domain() failed : %d", ret);
2984                 goto out1;
2985         }
2986
2987         dev->net->hard_header_len += TX_OVERHEAD;
2988         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2989
2990         /* Init all registers */
2991         ret = lan78xx_reset(dev);
2992         if (ret) {
2993                 netdev_warn(dev->net, "Registers INIT FAILED....");
2994                 goto out2;
2995         }
2996
2997         ret = lan78xx_mdio_init(dev);
2998         if (ret) {
2999                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3000                 goto out2;
3001         }
3002
3003         dev->net->flags |= IFF_MULTICAST;
3004
3005         pdata->wol = WAKE_MAGIC;
3006
3007         return ret;
3008
3009 out2:
3010         lan78xx_remove_irq_domain(dev);
3011
3012 out1:
3013         netdev_warn(dev->net, "Bind routine FAILED");
3014         cancel_work_sync(&pdata->set_multicast);
3015         cancel_work_sync(&pdata->set_vlan);
3016         kfree(pdata);
3017         return ret;
3018 }
3019
3020 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3021 {
3022         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3023
3024         lan78xx_remove_irq_domain(dev);
3025
3026         lan78xx_remove_mdio(dev);
3027
3028         if (pdata) {
3029                 cancel_work_sync(&pdata->set_multicast);
3030                 cancel_work_sync(&pdata->set_vlan);
3031                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3032                 kfree(pdata);
3033                 pdata = NULL;
3034                 dev->data[0] = 0;
3035         }
3036 }
3037
3038 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039                                     struct sk_buff *skb,
3040                                     u32 rx_cmd_a, u32 rx_cmd_b)
3041 {
3042         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3043             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
3044                 skb->ip_summed = CHECKSUM_NONE;
3045         } else {
3046                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3047                 skb->ip_summed = CHECKSUM_COMPLETE;
3048         }
3049 }
3050
3051 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3052 {
3053         int             status;
3054
3055         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3056                 skb_queue_tail(&dev->rxq_pause, skb);
3057                 return;
3058         }
3059
3060         dev->net->stats.rx_packets++;
3061         dev->net->stats.rx_bytes += skb->len;
3062
3063         skb->protocol = eth_type_trans(skb, dev->net);
3064
3065         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3066                   skb->len + sizeof(struct ethhdr), skb->protocol);
3067         memset(skb->cb, 0, sizeof(struct skb_data));
3068
3069         if (skb_defer_rx_timestamp(skb))
3070                 return;
3071
3072         status = netif_rx(skb);
3073         if (status != NET_RX_SUCCESS)
3074                 netif_dbg(dev, rx_err, dev->net,
3075                           "netif_rx status %d\n", status);
3076 }
3077
3078 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3079 {
3080         if (skb->len < dev->net->hard_header_len)
3081                 return 0;
3082
3083         while (skb->len > 0) {
3084                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3085                 u16 rx_cmd_c;
3086                 struct sk_buff *skb2;
3087                 unsigned char *packet;
3088
3089                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3090                 le32_to_cpus(&rx_cmd_a);
3091                 skb_pull(skb, sizeof(rx_cmd_a));
3092
3093                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3094                 le32_to_cpus(&rx_cmd_b);
3095                 skb_pull(skb, sizeof(rx_cmd_b));
3096
3097                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3098                 le16_to_cpus(&rx_cmd_c);
3099                 skb_pull(skb, sizeof(rx_cmd_c));
3100
3101                 packet = skb->data;
3102
3103                 /* get the packet length */
3104                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3105                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3106
3107                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3108                         netif_dbg(dev, rx_err, dev->net,
3109                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3110                 } else {
3111                         /* last frame in this batch */
3112                         if (skb->len == size) {
3113                                 lan78xx_rx_csum_offload(dev, skb,
3114                                                         rx_cmd_a, rx_cmd_b);
3115
3116                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3117                                 skb->truesize = size + sizeof(struct sk_buff);
3118
3119                                 return 1;
3120                         }
3121
3122                         skb2 = skb_clone(skb, GFP_ATOMIC);
3123                         if (unlikely(!skb2)) {
3124                                 netdev_warn(dev->net, "Error allocating skb");
3125                                 return 0;
3126                         }
3127
3128                         skb2->len = size;
3129                         skb2->data = packet;
3130                         skb_set_tail_pointer(skb2, size);
3131
3132                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3133
3134                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3135                         skb2->truesize = size + sizeof(struct sk_buff);
3136
3137                         lan78xx_skb_return(dev, skb2);
3138                 }
3139
3140                 skb_pull(skb, size);
3141
3142                 /* padding bytes before the next frame starts */
3143                 if (skb->len)
3144                         skb_pull(skb, align_count);
3145         }
3146
3147         return 1;
3148 }
3149
3150 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3151 {
3152         if (!lan78xx_rx(dev, skb)) {
3153                 dev->net->stats.rx_errors++;
3154                 goto done;
3155         }
3156
3157         if (skb->len) {
3158                 lan78xx_skb_return(dev, skb);
3159                 return;
3160         }
3161
3162         netif_dbg(dev, rx_err, dev->net, "drop\n");
3163         dev->net->stats.rx_errors++;
3164 done:
3165         skb_queue_tail(&dev->done, skb);
3166 }
3167
3168 static void rx_complete(struct urb *urb);
3169
3170 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3171 {
3172         struct sk_buff *skb;
3173         struct skb_data *entry;
3174         unsigned long lockflags;
3175         size_t size = dev->rx_urb_size;
3176         int ret = 0;
3177
3178         skb = netdev_alloc_skb_ip_align(dev->net, size);
3179         if (!skb) {
3180                 usb_free_urb(urb);
3181                 return -ENOMEM;
3182         }
3183
3184         entry = (struct skb_data *)skb->cb;
3185         entry->urb = urb;
3186         entry->dev = dev;
3187         entry->length = 0;
3188
3189         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3190                           skb->data, size, rx_complete, skb);
3191
3192         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3193
3194         if (netif_device_present(dev->net) &&
3195             netif_running(dev->net) &&
3196             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3197             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3198                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3199                 switch (ret) {
3200                 case 0:
3201                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3202                         break;
3203                 case -EPIPE:
3204                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3205                         break;
3206                 case -ENODEV:
3207                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3208                         netif_device_detach(dev->net);
3209                         break;
3210                 case -EHOSTUNREACH:
3211                         ret = -ENOLINK;
3212                         break;
3213                 default:
3214                         netif_dbg(dev, rx_err, dev->net,
3215                                   "rx submit, %d\n", ret);
3216                         tasklet_schedule(&dev->bh);
3217                 }
3218         } else {
3219                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3220                 ret = -ENOLINK;
3221         }
3222         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3223         if (ret) {
3224                 dev_kfree_skb_any(skb);
3225                 usb_free_urb(urb);
3226         }
3227         return ret;
3228 }
3229
3230 static void rx_complete(struct urb *urb)
3231 {
3232         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3233         struct skb_data *entry = (struct skb_data *)skb->cb;
3234         struct lan78xx_net *dev = entry->dev;
3235         int urb_status = urb->status;
3236         enum skb_state state;
3237
3238         skb_put(skb, urb->actual_length);
3239         state = rx_done;
3240         entry->urb = NULL;
3241
3242         switch (urb_status) {
3243         case 0:
3244                 if (skb->len < dev->net->hard_header_len) {
3245                         state = rx_cleanup;
3246                         dev->net->stats.rx_errors++;
3247                         dev->net->stats.rx_length_errors++;
3248                         netif_dbg(dev, rx_err, dev->net,
3249                                   "rx length %d\n", skb->len);
3250                 }
3251                 usb_mark_last_busy(dev->udev);
3252                 break;
3253         case -EPIPE:
3254                 dev->net->stats.rx_errors++;
3255                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3256                 /* FALLTHROUGH */
3257         case -ECONNRESET:                               /* async unlink */
3258         case -ESHUTDOWN:                                /* hardware gone */
3259                 netif_dbg(dev, ifdown, dev->net,
3260                           "rx shutdown, code %d\n", urb_status);
3261                 state = rx_cleanup;
3262                 entry->urb = urb;
3263                 urb = NULL;
3264                 break;
3265         case -EPROTO:
3266         case -ETIME:
3267         case -EILSEQ:
3268                 dev->net->stats.rx_errors++;
3269                 state = rx_cleanup;
3270                 entry->urb = urb;
3271                 urb = NULL;
3272                 break;
3273
3274         /* data overrun ... flush fifo? */
3275         case -EOVERFLOW:
3276                 dev->net->stats.rx_over_errors++;
3277                 /* FALLTHROUGH */
3278
3279         default:
3280                 state = rx_cleanup;
3281                 dev->net->stats.rx_errors++;
3282                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3283                 break;
3284         }
3285
3286         state = defer_bh(dev, skb, &dev->rxq, state);
3287
3288         if (urb) {
3289                 if (netif_running(dev->net) &&
3290                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3291                     state != unlink_start) {
3292                         rx_submit(dev, urb, GFP_ATOMIC);
3293                         return;
3294                 }
3295                 usb_free_urb(urb);
3296         }
3297         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3298 }
3299
3300 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3301 {
3302         int length;
3303         struct urb *urb = NULL;
3304         struct skb_data *entry;
3305         unsigned long flags;
3306         struct sk_buff_head *tqp = &dev->txq_pend;
3307         struct sk_buff *skb, *skb2;
3308         int ret;
3309         int count, pos;
3310         int skb_totallen, pkt_cnt;
3311
3312         skb_totallen = 0;
3313         pkt_cnt = 0;
3314         count = 0;
3315         length = 0;
3316         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3317                 if (skb_is_gso(skb)) {
3318                         if (pkt_cnt) {
3319                                 /* handle previous packets first */
3320                                 break;
3321                         }
3322                         count = 1;
3323                         length = skb->len - TX_OVERHEAD;
3324                         skb2 = skb_dequeue(tqp);
3325                         goto gso_skb;
3326                 }
3327
3328                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3329                         break;
3330                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3331                 pkt_cnt++;
3332         }
3333
3334         /* copy to a single skb */
3335         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3336         if (!skb)
3337                 goto drop;
3338
3339         skb_put(skb, skb_totallen);
3340
3341         for (count = pos = 0; count < pkt_cnt; count++) {
3342                 skb2 = skb_dequeue(tqp);
3343                 if (skb2) {
3344                         length += (skb2->len - TX_OVERHEAD);
3345                         memcpy(skb->data + pos, skb2->data, skb2->len);
3346                         pos += roundup(skb2->len, sizeof(u32));
3347                         dev_kfree_skb(skb2);
3348                 }
3349         }
3350
3351 gso_skb:
3352         urb = usb_alloc_urb(0, GFP_ATOMIC);
3353         if (!urb)
3354                 goto drop;
3355
3356         entry = (struct skb_data *)skb->cb;
3357         entry->urb = urb;
3358         entry->dev = dev;
3359         entry->length = length;
3360         entry->num_of_packet = count;
3361
3362         spin_lock_irqsave(&dev->txq.lock, flags);
3363         ret = usb_autopm_get_interface_async(dev->intf);
3364         if (ret < 0) {
3365                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3366                 goto drop;
3367         }
3368
3369         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3370                           skb->data, skb->len, tx_complete, skb);
3371
3372         if (length % dev->maxpacket == 0) {
3373                 /* send USB_ZERO_PACKET */
3374                 urb->transfer_flags |= URB_ZERO_PACKET;
3375         }
3376
3377 #ifdef CONFIG_PM
3378         /* if this triggers the device is still a sleep */
3379         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3380                 /* transmission will be done in resume */
3381                 usb_anchor_urb(urb, &dev->deferred);
3382                 /* no use to process more packets */
3383                 netif_stop_queue(dev->net);
3384                 usb_put_urb(urb);
3385                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3386                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3387                 return;
3388         }
3389 #endif
3390
3391         ret = usb_submit_urb(urb, GFP_ATOMIC);
3392         switch (ret) {
3393         case 0:
3394                 netif_trans_update(dev->net);
3395                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3396                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3397                         netif_stop_queue(dev->net);
3398                 break;
3399         case -EPIPE:
3400                 netif_stop_queue(dev->net);
3401                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3402                 usb_autopm_put_interface_async(dev->intf);
3403                 break;
3404         default:
3405                 usb_autopm_put_interface_async(dev->intf);
3406                 netif_dbg(dev, tx_err, dev->net,
3407                           "tx: submit urb err %d\n", ret);
3408                 break;
3409         }
3410
3411         spin_unlock_irqrestore(&dev->txq.lock, flags);
3412
3413         if (ret) {
3414                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3415 drop:
3416                 dev->net->stats.tx_dropped++;
3417                 if (skb)
3418                         dev_kfree_skb_any(skb);
3419                 usb_free_urb(urb);
3420         } else
3421                 netif_dbg(dev, tx_queued, dev->net,
3422                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3423 }
3424
3425 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3426 {
3427         struct urb *urb;
3428         int i;
3429
3430         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3431                 for (i = 0; i < 10; i++) {
3432                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3433                                 break;
3434                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3435                         if (urb)
3436                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3437                                         return;
3438                 }
3439
3440                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3441                         tasklet_schedule(&dev->bh);
3442         }
3443         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3444                 netif_wake_queue(dev->net);
3445 }
3446
3447 static void lan78xx_bh(unsigned long param)
3448 {
3449         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3450         struct sk_buff *skb;
3451         struct skb_data *entry;
3452
3453         while ((skb = skb_dequeue(&dev->done))) {
3454                 entry = (struct skb_data *)(skb->cb);
3455                 switch (entry->state) {
3456                 case rx_done:
3457                         entry->state = rx_cleanup;
3458                         rx_process(dev, skb);
3459                         continue;
3460                 case tx_done:
3461                         usb_free_urb(entry->urb);
3462                         dev_kfree_skb(skb);
3463                         continue;
3464                 case rx_cleanup:
3465                         usb_free_urb(entry->urb);
3466                         dev_kfree_skb(skb);
3467                         continue;
3468                 default:
3469                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3470                         return;
3471                 }
3472         }
3473
3474         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3475                 /* reset update timer delta */
3476                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3477                         dev->delta = 1;
3478                         mod_timer(&dev->stat_monitor,
3479                                   jiffies + STAT_UPDATE_TIMER);
3480                 }
3481
3482                 if (!skb_queue_empty(&dev->txq_pend))
3483                         lan78xx_tx_bh(dev);
3484
3485                 if (!timer_pending(&dev->delay) &&
3486                     !test_bit(EVENT_RX_HALT, &dev->flags))
3487                         lan78xx_rx_bh(dev);
3488         }
3489 }
3490
3491 static void lan78xx_delayedwork(struct work_struct *work)
3492 {
3493         int status;
3494         struct lan78xx_net *dev;
3495
3496         dev = container_of(work, struct lan78xx_net, wq.work);
3497
3498         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3499                 unlink_urbs(dev, &dev->txq);
3500                 status = usb_autopm_get_interface(dev->intf);
3501                 if (status < 0)
3502                         goto fail_pipe;
3503                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3504                 usb_autopm_put_interface(dev->intf);
3505                 if (status < 0 &&
3506                     status != -EPIPE &&
3507                     status != -ESHUTDOWN) {
3508                         if (netif_msg_tx_err(dev))
3509 fail_pipe:
3510                                 netdev_err(dev->net,
3511                                            "can't clear tx halt, status %d\n",
3512                                            status);
3513                 } else {
3514                         clear_bit(EVENT_TX_HALT, &dev->flags);
3515                         if (status != -ESHUTDOWN)
3516                                 netif_wake_queue(dev->net);
3517                 }
3518         }
3519         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3520                 unlink_urbs(dev, &dev->rxq);
3521                 status = usb_autopm_get_interface(dev->intf);
3522                 if (status < 0)
3523                                 goto fail_halt;
3524                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3525                 usb_autopm_put_interface(dev->intf);
3526                 if (status < 0 &&
3527                     status != -EPIPE &&
3528                     status != -ESHUTDOWN) {
3529                         if (netif_msg_rx_err(dev))
3530 fail_halt:
3531                                 netdev_err(dev->net,
3532                                            "can't clear rx halt, status %d\n",
3533                                            status);
3534                 } else {
3535                         clear_bit(EVENT_RX_HALT, &dev->flags);
3536                         tasklet_schedule(&dev->bh);
3537                 }
3538         }
3539
3540         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3541                 int ret = 0;
3542
3543                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3544                 status = usb_autopm_get_interface(dev->intf);
3545                 if (status < 0)
3546                         goto skip_reset;
3547                 if (lan78xx_link_reset(dev) < 0) {
3548                         usb_autopm_put_interface(dev->intf);
3549 skip_reset:
3550                         netdev_info(dev->net, "link reset failed (%d)\n",
3551                                     ret);
3552                 } else {
3553                         usb_autopm_put_interface(dev->intf);
3554                 }
3555         }
3556
3557         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3558                 lan78xx_update_stats(dev);
3559
3560                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3561
3562                 mod_timer(&dev->stat_monitor,
3563                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3564
3565                 dev->delta = min((dev->delta * 2), 50);
3566         }
3567 }
3568
3569 static void intr_complete(struct urb *urb)
3570 {
3571         struct lan78xx_net *dev = urb->context;
3572         int status = urb->status;
3573
3574         switch (status) {
3575         /* success */
3576         case 0:
3577                 lan78xx_status(dev, urb);
3578                 break;
3579
3580         /* software-driven interface shutdown */
3581         case -ENOENT:                   /* urb killed */
3582         case -ESHUTDOWN:                /* hardware gone */
3583                 netif_dbg(dev, ifdown, dev->net,
3584                           "intr shutdown, code %d\n", status);
3585                 return;
3586
3587         /* NOTE:  not throttling like RX/TX, since this endpoint
3588          * already polls infrequently
3589          */
3590         default:
3591                 netdev_dbg(dev->net, "intr status %d\n", status);
3592                 break;
3593         }
3594
3595         if (!netif_running(dev->net))
3596                 return;
3597
3598         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3599         status = usb_submit_urb(urb, GFP_ATOMIC);
3600         if (status != 0)
3601                 netif_err(dev, timer, dev->net,
3602                           "intr resubmit --> %d\n", status);
3603 }
3604
3605 static void lan78xx_disconnect(struct usb_interface *intf)
3606 {
3607         struct lan78xx_net              *dev;
3608         struct usb_device               *udev;
3609         struct net_device               *net;
3610         struct phy_device               *phydev;
3611
3612         dev = usb_get_intfdata(intf);
3613         usb_set_intfdata(intf, NULL);
3614         if (!dev)
3615                 return;
3616
3617         udev = interface_to_usbdev(intf);
3618         net = dev->net;
3619         phydev = net->phydev;
3620
3621         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3622         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3623
3624         phy_disconnect(net->phydev);
3625
3626         if (phy_is_pseudo_fixed_link(phydev))
3627                 fixed_phy_unregister(phydev);
3628
3629         unregister_netdev(net);
3630
3631         cancel_delayed_work_sync(&dev->wq);
3632
3633         usb_scuttle_anchored_urbs(&dev->deferred);
3634
3635         lan78xx_unbind(dev, intf);
3636
3637         usb_kill_urb(dev->urb_intr);
3638         usb_free_urb(dev->urb_intr);
3639
3640         free_netdev(net);
3641         usb_put_dev(udev);
3642 }
3643
3644 static void lan78xx_tx_timeout(struct net_device *net)
3645 {
3646         struct lan78xx_net *dev = netdev_priv(net);
3647
3648         unlink_urbs(dev, &dev->txq);
3649         tasklet_schedule(&dev->bh);
3650 }
3651
3652 static const struct net_device_ops lan78xx_netdev_ops = {
3653         .ndo_open               = lan78xx_open,
3654         .ndo_stop               = lan78xx_stop,
3655         .ndo_start_xmit         = lan78xx_start_xmit,
3656         .ndo_tx_timeout         = lan78xx_tx_timeout,
3657         .ndo_change_mtu         = lan78xx_change_mtu,
3658         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3659         .ndo_validate_addr      = eth_validate_addr,
3660         .ndo_do_ioctl           = lan78xx_ioctl,
3661         .ndo_set_rx_mode        = lan78xx_set_multicast,
3662         .ndo_set_features       = lan78xx_set_features,
3663         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3664         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3665 };
3666
3667 static void lan78xx_stat_monitor(struct timer_list *t)
3668 {
3669         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3670
3671         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3672 }
3673
3674 static int lan78xx_probe(struct usb_interface *intf,
3675                          const struct usb_device_id *id)
3676 {
3677         struct lan78xx_net *dev;
3678         struct net_device *netdev;
3679         struct usb_device *udev;
3680         int ret;
3681         unsigned maxp;
3682         unsigned period;
3683         u8 *buf = NULL;
3684
3685         udev = interface_to_usbdev(intf);
3686         udev = usb_get_dev(udev);
3687
3688         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3689         if (!netdev) {
3690                 dev_err(&intf->dev, "Error: OOM\n");
3691                 ret = -ENOMEM;
3692                 goto out1;
3693         }
3694
3695         /* netdev_printk() needs this */
3696         SET_NETDEV_DEV(netdev, &intf->dev);
3697
3698         dev = netdev_priv(netdev);
3699         dev->udev = udev;
3700         dev->intf = intf;
3701         dev->net = netdev;
3702         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3703                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3704
3705         skb_queue_head_init(&dev->rxq);
3706         skb_queue_head_init(&dev->txq);
3707         skb_queue_head_init(&dev->done);
3708         skb_queue_head_init(&dev->rxq_pause);
3709         skb_queue_head_init(&dev->txq_pend);
3710         mutex_init(&dev->phy_mutex);
3711
3712         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3713         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3714         init_usb_anchor(&dev->deferred);
3715
3716         netdev->netdev_ops = &lan78xx_netdev_ops;
3717         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3718         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3719
3720         dev->delta = 1;
3721         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3722
3723         mutex_init(&dev->stats.access_lock);
3724
3725         ret = lan78xx_bind(dev, intf);
3726         if (ret < 0)
3727                 goto out2;
3728         strcpy(netdev->name, "eth%d");
3729
3730         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3731                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3732
3733         /* MTU range: 68 - 9000 */
3734         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3735
3736         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3737         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3738         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3739
3740         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3741         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3742
3743         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3744                                         dev->ep_intr->desc.bEndpointAddress &
3745                                         USB_ENDPOINT_NUMBER_MASK);
3746         period = dev->ep_intr->desc.bInterval;
3747
3748         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3749         buf = kmalloc(maxp, GFP_KERNEL);
3750         if (buf) {
3751                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3752                 if (!dev->urb_intr) {
3753                         ret = -ENOMEM;
3754                         kfree(buf);
3755                         goto out3;
3756                 } else {
3757                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3758                                          dev->pipe_intr, buf, maxp,
3759                                          intr_complete, dev, period);
3760                 }
3761         }
3762
3763         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3764
3765         /* driver requires remote-wakeup capability during autosuspend. */
3766         intf->needs_remote_wakeup = 1;
3767
3768         ret = register_netdev(netdev);
3769         if (ret != 0) {
3770                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3771                 goto out3;
3772         }
3773
3774         usb_set_intfdata(intf, dev);
3775
3776         ret = device_set_wakeup_enable(&udev->dev, true);
3777
3778          /* Default delay of 2sec has more overhead than advantage.
3779           * Set to 10sec as default.
3780           */
3781         pm_runtime_set_autosuspend_delay(&udev->dev,
3782                                          DEFAULT_AUTOSUSPEND_DELAY);
3783
3784         ret = lan78xx_phy_init(dev);
3785         if (ret < 0)
3786                 goto out4;
3787
3788         return 0;
3789
3790 out4:
3791         unregister_netdev(netdev);
3792 out3:
3793         lan78xx_unbind(dev, intf);
3794 out2:
3795         free_netdev(netdev);
3796 out1:
3797         usb_put_dev(udev);
3798
3799         return ret;
3800 }
3801
3802 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3803 {
3804         const u16 crc16poly = 0x8005;
3805         int i;
3806         u16 bit, crc, msb;
3807         u8 data;
3808
3809         crc = 0xFFFF;
3810         for (i = 0; i < len; i++) {
3811                 data = *buf++;
3812                 for (bit = 0; bit < 8; bit++) {
3813                         msb = crc >> 15;
3814                         crc <<= 1;
3815
3816                         if (msb ^ (u16)(data & 1)) {
3817                                 crc ^= crc16poly;
3818                                 crc |= (u16)0x0001U;
3819                         }
3820                         data >>= 1;
3821                 }
3822         }
3823
3824         return crc;
3825 }
3826
3827 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3828 {
3829         u32 buf;
3830         int ret;
3831         int mask_index;
3832         u16 crc;
3833         u32 temp_wucsr;
3834         u32 temp_pmt_ctl;
3835         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3836         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3837         const u8 arp_type[2] = { 0x08, 0x06 };
3838
3839         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3840         buf &= ~MAC_TX_TXEN_;
3841         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3842         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3843         buf &= ~MAC_RX_RXEN_;
3844         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3845
3846         ret = lan78xx_write_reg(dev, WUCSR, 0);
3847         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3848         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3849
3850         temp_wucsr = 0;
3851
3852         temp_pmt_ctl = 0;
3853         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3854         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3855         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3856
3857         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3858                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3859
3860         mask_index = 0;
3861         if (wol & WAKE_PHY) {
3862                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3863
3864                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3865                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3866                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3867         }
3868         if (wol & WAKE_MAGIC) {
3869                 temp_wucsr |= WUCSR_MPEN_;
3870
3871                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3872                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3873                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3874         }
3875         if (wol & WAKE_BCAST) {
3876                 temp_wucsr |= WUCSR_BCST_EN_;
3877
3878                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3879                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3880                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3881         }
3882         if (wol & WAKE_MCAST) {
3883                 temp_wucsr |= WUCSR_WAKE_EN_;
3884
3885                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3886                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3887                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3888                                         WUF_CFGX_EN_ |
3889                                         WUF_CFGX_TYPE_MCAST_ |
3890                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3891                                         (crc & WUF_CFGX_CRC16_MASK_));
3892
3893                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3894                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3895                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3896                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3897                 mask_index++;
3898
3899                 /* for IPv6 Multicast */
3900                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3901                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3902                                         WUF_CFGX_EN_ |
3903                                         WUF_CFGX_TYPE_MCAST_ |
3904                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3905                                         (crc & WUF_CFGX_CRC16_MASK_));
3906
3907                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3908                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3909                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3910                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3911                 mask_index++;
3912
3913                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3914                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3915                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3916         }
3917         if (wol & WAKE_UCAST) {
3918                 temp_wucsr |= WUCSR_PFDA_EN_;
3919
3920                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3921                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3922                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3923         }
3924         if (wol & WAKE_ARP) {
3925                 temp_wucsr |= WUCSR_WAKE_EN_;
3926
3927                 /* set WUF_CFG & WUF_MASK
3928                  * for packettype (offset 12,13) = ARP (0x0806)
3929                  */
3930                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3931                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3932                                         WUF_CFGX_EN_ |
3933                                         WUF_CFGX_TYPE_ALL_ |
3934                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3935                                         (crc & WUF_CFGX_CRC16_MASK_));
3936
3937                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3938                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3939                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3940                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3941                 mask_index++;
3942
3943                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3944                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3945                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3946         }
3947
3948         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3949
3950         /* when multiple WOL bits are set */
3951         if (hweight_long((unsigned long)wol) > 1) {
3952                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3953                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3954                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3955         }
3956         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3957
3958         /* clear WUPS */
3959         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3960         buf |= PMT_CTL_WUPS_MASK_;
3961         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3962
3963         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3964         buf |= MAC_RX_RXEN_;
3965         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3966
3967         return 0;
3968 }
3969
3970 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3971 {
3972         struct lan78xx_net *dev = usb_get_intfdata(intf);
3973         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3974         u32 buf;
3975         int ret;
3976         int event;
3977
3978         event = message.event;
3979
3980         if (!dev->suspend_count++) {
3981                 spin_lock_irq(&dev->txq.lock);
3982                 /* don't autosuspend while transmitting */
3983                 if ((skb_queue_len(&dev->txq) ||
3984                      skb_queue_len(&dev->txq_pend)) &&
3985                         PMSG_IS_AUTO(message)) {
3986                         spin_unlock_irq(&dev->txq.lock);
3987                         ret = -EBUSY;
3988                         goto out;
3989                 } else {
3990                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3991                         spin_unlock_irq(&dev->txq.lock);
3992                 }
3993
3994                 /* stop TX & RX */
3995                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3996                 buf &= ~MAC_TX_TXEN_;
3997                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3998                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3999                 buf &= ~MAC_RX_RXEN_;
4000                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4001
4002                 /* empty out the rx and queues */
4003                 netif_device_detach(dev->net);
4004                 lan78xx_terminate_urbs(dev);
4005                 usb_kill_urb(dev->urb_intr);
4006
4007                 /* reattach */
4008                 netif_device_attach(dev->net);
4009         }
4010
4011         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4012                 del_timer(&dev->stat_monitor);
4013
4014                 if (PMSG_IS_AUTO(message)) {
4015                         /* auto suspend (selective suspend) */
4016                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4017                         buf &= ~MAC_TX_TXEN_;
4018                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4019                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4020                         buf &= ~MAC_RX_RXEN_;
4021                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4022
4023                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4024                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4025                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4026
4027                         /* set goodframe wakeup */
4028                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4029
4030                         buf |= WUCSR_RFE_WAKE_EN_;
4031                         buf |= WUCSR_STORE_WAKE_;
4032
4033                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4034
4035                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4036
4037                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4038                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4039
4040                         buf |= PMT_CTL_PHY_WAKE_EN_;
4041                         buf |= PMT_CTL_WOL_EN_;
4042                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4043                         buf |= PMT_CTL_SUS_MODE_3_;
4044
4045                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4046
4047                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4048
4049                         buf |= PMT_CTL_WUPS_MASK_;
4050
4051                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4052
4053                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4054                         buf |= MAC_RX_RXEN_;
4055                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4056                 } else {
4057                         lan78xx_set_suspend(dev, pdata->wol);
4058                 }
4059         }
4060
4061         ret = 0;
4062 out:
4063         return ret;
4064 }
4065
4066 static int lan78xx_resume(struct usb_interface *intf)
4067 {
4068         struct lan78xx_net *dev = usb_get_intfdata(intf);
4069         struct sk_buff *skb;
4070         struct urb *res;
4071         int ret;
4072         u32 buf;
4073
4074         if (!timer_pending(&dev->stat_monitor)) {
4075                 dev->delta = 1;
4076                 mod_timer(&dev->stat_monitor,
4077                           jiffies + STAT_UPDATE_TIMER);
4078         }
4079
4080         if (!--dev->suspend_count) {
4081                 /* resume interrupt URBs */
4082                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4083                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4084
4085                 spin_lock_irq(&dev->txq.lock);
4086                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4087                         skb = (struct sk_buff *)res->context;
4088                         ret = usb_submit_urb(res, GFP_ATOMIC);
4089                         if (ret < 0) {
4090                                 dev_kfree_skb_any(skb);
4091                                 usb_free_urb(res);
4092                                 usb_autopm_put_interface_async(dev->intf);
4093                         } else {
4094                                 netif_trans_update(dev->net);
4095                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4096                         }
4097                 }
4098
4099                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4100                 spin_unlock_irq(&dev->txq.lock);
4101
4102                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4103                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4104                                 netif_start_queue(dev->net);
4105                         tasklet_schedule(&dev->bh);
4106                 }
4107         }
4108
4109         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4110         ret = lan78xx_write_reg(dev, WUCSR, 0);
4111         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4112
4113         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4114                                              WUCSR2_ARP_RCD_ |
4115                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4116                                              WUCSR2_IPV4_TCPSYN_RCD_);
4117
4118         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4119                                             WUCSR_EEE_RX_WAKE_ |
4120                                             WUCSR_PFDA_FR_ |
4121                                             WUCSR_RFE_WAKE_FR_ |
4122                                             WUCSR_WUFR_ |
4123                                             WUCSR_MPR_ |
4124                                             WUCSR_BCST_FR_);
4125
4126         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4127         buf |= MAC_TX_TXEN_;
4128         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4129
4130         return 0;
4131 }
4132
4133 static int lan78xx_reset_resume(struct usb_interface *intf)
4134 {
4135         struct lan78xx_net *dev = usb_get_intfdata(intf);
4136
4137         lan78xx_reset(dev);
4138
4139         phy_start(dev->net->phydev);
4140
4141         return lan78xx_resume(intf);
4142 }
4143
4144 static const struct usb_device_id products[] = {
4145         {
4146         /* LAN7800 USB Gigabit Ethernet Device */
4147         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4148         },
4149         {
4150         /* LAN7850 USB Gigabit Ethernet Device */
4151         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4152         },
4153         {
4154         /* LAN7801 USB Gigabit Ethernet Device */
4155         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4156         },
4157         {},
4158 };
4159 MODULE_DEVICE_TABLE(usb, products);
4160
4161 static struct usb_driver lan78xx_driver = {
4162         .name                   = DRIVER_NAME,
4163         .id_table               = products,
4164         .probe                  = lan78xx_probe,
4165         .disconnect             = lan78xx_disconnect,
4166         .suspend                = lan78xx_suspend,
4167         .resume                 = lan78xx_resume,
4168         .reset_resume           = lan78xx_reset_resume,
4169         .supports_autosuspend   = 1,
4170         .disable_hub_initiated_lpm = 1,
4171 };
4172
4173 module_usb_driver(lan78xx_driver);
4174
4175 MODULE_AUTHOR(DRIVER_AUTHOR);
4176 MODULE_DESCRIPTION(DRIVER_DESC);
4177 MODULE_LICENSE("GPL");