GNU Linux-libre 4.9.294-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include <linux/of_net.h>
36 #include "lan78xx.h"
37
38 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
39 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
40 #define DRIVER_NAME     "lan78xx"
41 #define DRIVER_VERSION  "1.0.4"
42
43 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
44 #define THROTTLE_JIFFIES                (HZ / 8)
45 #define UNLINK_TIMEOUT_MS               3
46
47 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
48
49 #define SS_USB_PKT_SIZE                 (1024)
50 #define HS_USB_PKT_SIZE                 (512)
51 #define FS_USB_PKT_SIZE                 (64)
52
53 #define MAX_RX_FIFO_SIZE                (12 * 1024)
54 #define MAX_TX_FIFO_SIZE                (12 * 1024)
55 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
56 #define DEFAULT_BULK_IN_DELAY           (0x0800)
57 #define MAX_SINGLE_PACKET_SIZE          (9000)
58 #define DEFAULT_TX_CSUM_ENABLE          (true)
59 #define DEFAULT_RX_CSUM_ENABLE          (true)
60 #define DEFAULT_TSO_CSUM_ENABLE         (true)
61 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
62 #define TX_OVERHEAD                     (8)
63 #define RXW_PADDING                     2
64
65 #define LAN78XX_USB_VENDOR_ID           (0x0424)
66 #define LAN7800_USB_PRODUCT_ID          (0x7800)
67 #define LAN7850_USB_PRODUCT_ID          (0x7850)
68 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
69 #define LAN78XX_OTP_MAGIC               (0x78F3)
70
71 #define MII_READ                        1
72 #define MII_WRITE                       0
73
74 #define EEPROM_INDICATOR                (0xA5)
75 #define EEPROM_MAC_OFFSET               (0x01)
76 #define MAX_EEPROM_SIZE                 512
77 #define OTP_INDICATOR_1                 (0xF3)
78 #define OTP_INDICATOR_2                 (0xF7)
79
80 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
81                                          WAKE_MCAST | WAKE_BCAST | \
82                                          WAKE_ARP | WAKE_MAGIC)
83
84 /* USB related defines */
85 #define BULK_IN_PIPE                    1
86 #define BULK_OUT_PIPE                   2
87
88 /* default autosuspend delay (mSec)*/
89 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
90
91 /* statistic update interval (mSec) */
92 #define STAT_UPDATE_TIMER               (1 * 1000)
93
94 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
95         "RX FCS Errors",
96         "RX Alignment Errors",
97         "Rx Fragment Errors",
98         "RX Jabber Errors",
99         "RX Undersize Frame Errors",
100         "RX Oversize Frame Errors",
101         "RX Dropped Frames",
102         "RX Unicast Byte Count",
103         "RX Broadcast Byte Count",
104         "RX Multicast Byte Count",
105         "RX Unicast Frames",
106         "RX Broadcast Frames",
107         "RX Multicast Frames",
108         "RX Pause Frames",
109         "RX 64 Byte Frames",
110         "RX 65 - 127 Byte Frames",
111         "RX 128 - 255 Byte Frames",
112         "RX 256 - 511 Bytes Frames",
113         "RX 512 - 1023 Byte Frames",
114         "RX 1024 - 1518 Byte Frames",
115         "RX Greater 1518 Byte Frames",
116         "EEE RX LPI Transitions",
117         "EEE RX LPI Time",
118         "TX FCS Errors",
119         "TX Excess Deferral Errors",
120         "TX Carrier Errors",
121         "TX Bad Byte Count",
122         "TX Single Collisions",
123         "TX Multiple Collisions",
124         "TX Excessive Collision",
125         "TX Late Collisions",
126         "TX Unicast Byte Count",
127         "TX Broadcast Byte Count",
128         "TX Multicast Byte Count",
129         "TX Unicast Frames",
130         "TX Broadcast Frames",
131         "TX Multicast Frames",
132         "TX Pause Frames",
133         "TX 64 Byte Frames",
134         "TX 65 - 127 Byte Frames",
135         "TX 128 - 255 Byte Frames",
136         "TX 256 - 511 Bytes Frames",
137         "TX 512 - 1023 Byte Frames",
138         "TX 1024 - 1518 Byte Frames",
139         "TX Greater 1518 Byte Frames",
140         "EEE TX LPI Transitions",
141         "EEE TX LPI Time",
142 };
143
144 struct lan78xx_statstage {
145         u32 rx_fcs_errors;
146         u32 rx_alignment_errors;
147         u32 rx_fragment_errors;
148         u32 rx_jabber_errors;
149         u32 rx_undersize_frame_errors;
150         u32 rx_oversize_frame_errors;
151         u32 rx_dropped_frames;
152         u32 rx_unicast_byte_count;
153         u32 rx_broadcast_byte_count;
154         u32 rx_multicast_byte_count;
155         u32 rx_unicast_frames;
156         u32 rx_broadcast_frames;
157         u32 rx_multicast_frames;
158         u32 rx_pause_frames;
159         u32 rx_64_byte_frames;
160         u32 rx_65_127_byte_frames;
161         u32 rx_128_255_byte_frames;
162         u32 rx_256_511_bytes_frames;
163         u32 rx_512_1023_byte_frames;
164         u32 rx_1024_1518_byte_frames;
165         u32 rx_greater_1518_byte_frames;
166         u32 eee_rx_lpi_transitions;
167         u32 eee_rx_lpi_time;
168         u32 tx_fcs_errors;
169         u32 tx_excess_deferral_errors;
170         u32 tx_carrier_errors;
171         u32 tx_bad_byte_count;
172         u32 tx_single_collisions;
173         u32 tx_multiple_collisions;
174         u32 tx_excessive_collision;
175         u32 tx_late_collisions;
176         u32 tx_unicast_byte_count;
177         u32 tx_broadcast_byte_count;
178         u32 tx_multicast_byte_count;
179         u32 tx_unicast_frames;
180         u32 tx_broadcast_frames;
181         u32 tx_multicast_frames;
182         u32 tx_pause_frames;
183         u32 tx_64_byte_frames;
184         u32 tx_65_127_byte_frames;
185         u32 tx_128_255_byte_frames;
186         u32 tx_256_511_bytes_frames;
187         u32 tx_512_1023_byte_frames;
188         u32 tx_1024_1518_byte_frames;
189         u32 tx_greater_1518_byte_frames;
190         u32 eee_tx_lpi_transitions;
191         u32 eee_tx_lpi_time;
192 };
193
194 struct lan78xx_statstage64 {
195         u64 rx_fcs_errors;
196         u64 rx_alignment_errors;
197         u64 rx_fragment_errors;
198         u64 rx_jabber_errors;
199         u64 rx_undersize_frame_errors;
200         u64 rx_oversize_frame_errors;
201         u64 rx_dropped_frames;
202         u64 rx_unicast_byte_count;
203         u64 rx_broadcast_byte_count;
204         u64 rx_multicast_byte_count;
205         u64 rx_unicast_frames;
206         u64 rx_broadcast_frames;
207         u64 rx_multicast_frames;
208         u64 rx_pause_frames;
209         u64 rx_64_byte_frames;
210         u64 rx_65_127_byte_frames;
211         u64 rx_128_255_byte_frames;
212         u64 rx_256_511_bytes_frames;
213         u64 rx_512_1023_byte_frames;
214         u64 rx_1024_1518_byte_frames;
215         u64 rx_greater_1518_byte_frames;
216         u64 eee_rx_lpi_transitions;
217         u64 eee_rx_lpi_time;
218         u64 tx_fcs_errors;
219         u64 tx_excess_deferral_errors;
220         u64 tx_carrier_errors;
221         u64 tx_bad_byte_count;
222         u64 tx_single_collisions;
223         u64 tx_multiple_collisions;
224         u64 tx_excessive_collision;
225         u64 tx_late_collisions;
226         u64 tx_unicast_byte_count;
227         u64 tx_broadcast_byte_count;
228         u64 tx_multicast_byte_count;
229         u64 tx_unicast_frames;
230         u64 tx_broadcast_frames;
231         u64 tx_multicast_frames;
232         u64 tx_pause_frames;
233         u64 tx_64_byte_frames;
234         u64 tx_65_127_byte_frames;
235         u64 tx_128_255_byte_frames;
236         u64 tx_256_511_bytes_frames;
237         u64 tx_512_1023_byte_frames;
238         u64 tx_1024_1518_byte_frames;
239         u64 tx_greater_1518_byte_frames;
240         u64 eee_tx_lpi_transitions;
241         u64 eee_tx_lpi_time;
242 };
243
244 struct lan78xx_net;
245
246 struct lan78xx_priv {
247         struct lan78xx_net *dev;
248         u32 rfe_ctl;
249         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
250         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
251         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
252         struct mutex dataport_mutex; /* for dataport access */
253         spinlock_t rfe_ctl_lock; /* for rfe register access */
254         struct work_struct set_multicast;
255         struct work_struct set_vlan;
256         u32 wol;
257 };
258
259 enum skb_state {
260         illegal = 0,
261         tx_start,
262         tx_done,
263         rx_start,
264         rx_done,
265         rx_cleanup,
266         unlink_start
267 };
268
269 struct skb_data {               /* skb->cb is one of these */
270         struct urb *urb;
271         struct lan78xx_net *dev;
272         enum skb_state state;
273         size_t length;
274         int num_of_packet;
275 };
276
277 struct usb_context {
278         struct usb_ctrlrequest req;
279         struct lan78xx_net *dev;
280 };
281
282 #define EVENT_TX_HALT                   0
283 #define EVENT_RX_HALT                   1
284 #define EVENT_RX_MEMORY                 2
285 #define EVENT_STS_SPLIT                 3
286 #define EVENT_LINK_RESET                4
287 #define EVENT_RX_PAUSED                 5
288 #define EVENT_DEV_WAKING                6
289 #define EVENT_DEV_ASLEEP                7
290 #define EVENT_DEV_OPEN                  8
291 #define EVENT_STAT_UPDATE               9
292
293 struct statstage {
294         struct mutex                    access_lock;    /* for stats access */
295         struct lan78xx_statstage        saved;
296         struct lan78xx_statstage        rollover_count;
297         struct lan78xx_statstage        rollover_max;
298         struct lan78xx_statstage64      curr_stat;
299 };
300
301 struct lan78xx_net {
302         struct net_device       *net;
303         struct usb_device       *udev;
304         struct usb_interface    *intf;
305         void                    *driver_priv;
306
307         int                     rx_qlen;
308         int                     tx_qlen;
309         struct sk_buff_head     rxq;
310         struct sk_buff_head     txq;
311         struct sk_buff_head     done;
312         struct sk_buff_head     rxq_pause;
313         struct sk_buff_head     txq_pend;
314
315         struct tasklet_struct   bh;
316         struct delayed_work     wq;
317
318         int                     msg_enable;
319
320         struct urb              *urb_intr;
321         struct usb_anchor       deferred;
322
323         struct mutex            phy_mutex; /* for phy access */
324         unsigned                pipe_in, pipe_out, pipe_intr;
325
326         u32                     hard_mtu;       /* count any extra framing */
327         size_t                  rx_urb_size;    /* size for rx urbs */
328
329         unsigned long           flags;
330
331         wait_queue_head_t       *wait;
332         unsigned char           suspend_count;
333
334         unsigned                maxpacket;
335         struct timer_list       delay;
336         struct timer_list       stat_monitor;
337
338         unsigned long           data[5];
339
340         int                     link_on;
341         u8                      mdix_ctrl;
342
343         u32                     chipid;
344         u32                     chiprev;
345         struct mii_bus          *mdiobus;
346
347         int                     fc_autoneg;
348         u8                      fc_request_control;
349
350         int                     delta;
351         struct statstage        stats;
352 };
353
354 /* use ethtool to change the level for any given device */
355 static int msg_level = -1;
356 module_param(msg_level, int, 0);
357 MODULE_PARM_DESC(msg_level, "Override default message level");
358
359 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
360 {
361         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
362         int ret;
363
364         if (!buf)
365                 return -ENOMEM;
366
367         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
368                               USB_VENDOR_REQUEST_READ_REGISTER,
369                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
370                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
371         if (likely(ret >= 0)) {
372                 le32_to_cpus(buf);
373                 *data = *buf;
374         } else {
375                 netdev_warn(dev->net,
376                             "Failed to read register index 0x%08x. ret = %d",
377                             index, ret);
378         }
379
380         kfree(buf);
381
382         return ret;
383 }
384
385 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
386 {
387         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
388         int ret;
389
390         if (!buf)
391                 return -ENOMEM;
392
393         *buf = data;
394         cpu_to_le32s(buf);
395
396         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
397                               USB_VENDOR_REQUEST_WRITE_REGISTER,
398                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
399                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
400         if (unlikely(ret < 0)) {
401                 netdev_warn(dev->net,
402                             "Failed to write register index 0x%08x. ret = %d",
403                             index, ret);
404         }
405
406         kfree(buf);
407
408         return ret;
409 }
410
411 static int lan78xx_read_stats(struct lan78xx_net *dev,
412                               struct lan78xx_statstage *data)
413 {
414         int ret = 0;
415         int i;
416         struct lan78xx_statstage *stats;
417         u32 *src;
418         u32 *dst;
419
420         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
421         if (!stats)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev,
425                               usb_rcvctrlpipe(dev->udev, 0),
426                               USB_VENDOR_REQUEST_GET_STATS,
427                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
428                               0,
429                               0,
430                               (void *)stats,
431                               sizeof(*stats),
432                               USB_CTRL_SET_TIMEOUT);
433         if (likely(ret >= 0)) {
434                 src = (u32 *)stats;
435                 dst = (u32 *)data;
436                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
437                         le32_to_cpus(&src[i]);
438                         dst[i] = src[i];
439                 }
440         } else {
441                 netdev_warn(dev->net,
442                             "Failed to read stat ret = %d", ret);
443         }
444
445         kfree(stats);
446
447         return ret;
448 }
449
450 #define check_counter_rollover(struct1, dev_stats, member) {    \
451         if (struct1->member < dev_stats.saved.member)           \
452                 dev_stats.rollover_count.member++;              \
453         }
454
455 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
456                                         struct lan78xx_statstage *stats)
457 {
458         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
459         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
460         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
461         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
462         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
463         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
464         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
465         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
466         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
467         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
468         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
469         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
470         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
471         check_counter_rollover(stats, dev->stats, rx_pause_frames);
472         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
473         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
474         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
475         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
476         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
477         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
478         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
479         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
480         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
481         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
482         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
483         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
484         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
485         check_counter_rollover(stats, dev->stats, tx_single_collisions);
486         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
487         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
488         check_counter_rollover(stats, dev->stats, tx_late_collisions);
489         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
490         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
491         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
492         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
493         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
494         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
495         check_counter_rollover(stats, dev->stats, tx_pause_frames);
496         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
497         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
498         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
499         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
500         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
501         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
502         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
503         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
504         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
505
506         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
507 }
508
509 static void lan78xx_update_stats(struct lan78xx_net *dev)
510 {
511         u32 *p, *count, *max;
512         u64 *data;
513         int i;
514         struct lan78xx_statstage lan78xx_stats;
515
516         if (usb_autopm_get_interface(dev->intf) < 0)
517                 return;
518
519         p = (u32 *)&lan78xx_stats;
520         count = (u32 *)&dev->stats.rollover_count;
521         max = (u32 *)&dev->stats.rollover_max;
522         data = (u64 *)&dev->stats.curr_stat;
523
524         mutex_lock(&dev->stats.access_lock);
525
526         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
527                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
528
529         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
530                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
531
532         mutex_unlock(&dev->stats.access_lock);
533
534         usb_autopm_put_interface(dev->intf);
535 }
536
537 /* Loop until the read is completed with timeout called with phy_mutex held */
538 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
539 {
540         unsigned long start_time = jiffies;
541         u32 val;
542         int ret;
543
544         do {
545                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
546                 if (unlikely(ret < 0))
547                         return -EIO;
548
549                 if (!(val & MII_ACC_MII_BUSY_))
550                         return 0;
551         } while (!time_after(jiffies, start_time + HZ));
552
553         return -EIO;
554 }
555
556 static inline u32 mii_access(int id, int index, int read)
557 {
558         u32 ret;
559
560         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
561         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
562         if (read)
563                 ret |= MII_ACC_MII_READ_;
564         else
565                 ret |= MII_ACC_MII_WRITE_;
566         ret |= MII_ACC_MII_BUSY_;
567
568         return ret;
569 }
570
571 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
572 {
573         unsigned long start_time = jiffies;
574         u32 val;
575         int ret;
576
577         do {
578                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
579                 if (unlikely(ret < 0))
580                         return -EIO;
581
582                 if (!(val & E2P_CMD_EPC_BUSY_) ||
583                     (val & E2P_CMD_EPC_TIMEOUT_))
584                         break;
585                 usleep_range(40, 100);
586         } while (!time_after(jiffies, start_time + HZ));
587
588         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
589                 netdev_warn(dev->net, "EEPROM read operation timeout");
590                 return -EIO;
591         }
592
593         return 0;
594 }
595
596 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
597 {
598         unsigned long start_time = jiffies;
599         u32 val;
600         int ret;
601
602         do {
603                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
604                 if (unlikely(ret < 0))
605                         return -EIO;
606
607                 if (!(val & E2P_CMD_EPC_BUSY_))
608                         return 0;
609
610                 usleep_range(40, 100);
611         } while (!time_after(jiffies, start_time + HZ));
612
613         netdev_warn(dev->net, "EEPROM is busy");
614         return -EIO;
615 }
616
617 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
618                                    u32 length, u8 *data)
619 {
620         u32 val;
621         u32 saved;
622         int i, ret;
623         int retval;
624
625         /* depends on chip, some EEPROM pins are muxed with LED function.
626          * disable & restore LED function to access EEPROM.
627          */
628         ret = lan78xx_read_reg(dev, HW_CFG, &val);
629         saved = val;
630         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
631                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
632                 ret = lan78xx_write_reg(dev, HW_CFG, val);
633         }
634
635         retval = lan78xx_eeprom_confirm_not_busy(dev);
636         if (retval)
637                 return retval;
638
639         for (i = 0; i < length; i++) {
640                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
641                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
642                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
643                 if (unlikely(ret < 0)) {
644                         retval = -EIO;
645                         goto exit;
646                 }
647
648                 retval = lan78xx_wait_eeprom(dev);
649                 if (retval < 0)
650                         goto exit;
651
652                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
653                 if (unlikely(ret < 0)) {
654                         retval = -EIO;
655                         goto exit;
656                 }
657
658                 data[i] = val & 0xFF;
659                 offset++;
660         }
661
662         retval = 0;
663 exit:
664         if (dev->chipid == ID_REV_CHIP_ID_7800_)
665                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
666
667         return retval;
668 }
669
670 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
671                                u32 length, u8 *data)
672 {
673         u8 sig;
674         int ret;
675
676         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
677         if ((ret == 0) && (sig == EEPROM_INDICATOR))
678                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
679         else
680                 ret = -EINVAL;
681
682         return ret;
683 }
684
685 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
686                                     u32 length, u8 *data)
687 {
688         u32 val;
689         u32 saved;
690         int i, ret;
691         int retval;
692
693         /* depends on chip, some EEPROM pins are muxed with LED function.
694          * disable & restore LED function to access EEPROM.
695          */
696         ret = lan78xx_read_reg(dev, HW_CFG, &val);
697         saved = val;
698         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
699                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
700                 ret = lan78xx_write_reg(dev, HW_CFG, val);
701         }
702
703         retval = lan78xx_eeprom_confirm_not_busy(dev);
704         if (retval)
705                 goto exit;
706
707         /* Issue write/erase enable command */
708         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
709         ret = lan78xx_write_reg(dev, E2P_CMD, val);
710         if (unlikely(ret < 0)) {
711                 retval = -EIO;
712                 goto exit;
713         }
714
715         retval = lan78xx_wait_eeprom(dev);
716         if (retval < 0)
717                 goto exit;
718
719         for (i = 0; i < length; i++) {
720                 /* Fill data register */
721                 val = data[i];
722                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
723                 if (ret < 0) {
724                         retval = -EIO;
725                         goto exit;
726                 }
727
728                 /* Send "write" command */
729                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
730                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
731                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
732                 if (ret < 0) {
733                         retval = -EIO;
734                         goto exit;
735                 }
736
737                 retval = lan78xx_wait_eeprom(dev);
738                 if (retval < 0)
739                         goto exit;
740
741                 offset++;
742         }
743
744         retval = 0;
745 exit:
746         if (dev->chipid == ID_REV_CHIP_ID_7800_)
747                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
748
749         return retval;
750 }
751
752 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
753                                 u32 length, u8 *data)
754 {
755         int i;
756         int ret;
757         u32 buf;
758         unsigned long timeout;
759
760         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
761
762         if (buf & OTP_PWR_DN_PWRDN_N_) {
763                 /* clear it and wait to be cleared */
764                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
765
766                 timeout = jiffies + HZ;
767                 do {
768                         usleep_range(1, 10);
769                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
770                         if (time_after(jiffies, timeout)) {
771                                 netdev_warn(dev->net,
772                                             "timeout on OTP_PWR_DN");
773                                 return -EIO;
774                         }
775                 } while (buf & OTP_PWR_DN_PWRDN_N_);
776         }
777
778         for (i = 0; i < length; i++) {
779                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
780                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
781                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
782                                         ((offset + i) & OTP_ADDR2_10_3));
783
784                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
785                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
786
787                 timeout = jiffies + HZ;
788                 do {
789                         udelay(1);
790                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
791                         if (time_after(jiffies, timeout)) {
792                                 netdev_warn(dev->net,
793                                             "timeout on OTP_STATUS");
794                                 return -EIO;
795                         }
796                 } while (buf & OTP_STATUS_BUSY_);
797
798                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
799
800                 data[i] = (u8)(buf & 0xFF);
801         }
802
803         return 0;
804 }
805
806 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
807                                  u32 length, u8 *data)
808 {
809         int i;
810         int ret;
811         u32 buf;
812         unsigned long timeout;
813
814         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
815
816         if (buf & OTP_PWR_DN_PWRDN_N_) {
817                 /* clear it and wait to be cleared */
818                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
819
820                 timeout = jiffies + HZ;
821                 do {
822                         udelay(1);
823                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
824                         if (time_after(jiffies, timeout)) {
825                                 netdev_warn(dev->net,
826                                             "timeout on OTP_PWR_DN completion");
827                                 return -EIO;
828                         }
829                 } while (buf & OTP_PWR_DN_PWRDN_N_);
830         }
831
832         /* set to BYTE program mode */
833         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
841                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "Timeout on OTP_STATUS completion");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854         }
855
856         return 0;
857 }
858
859 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
860                             u32 length, u8 *data)
861 {
862         u8 sig;
863         int ret;
864
865         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
866
867         if (ret == 0) {
868                 if (sig == OTP_INDICATOR_2)
869                         offset += 0x100;
870                 else if (sig != OTP_INDICATOR_1)
871                         ret = -EINVAL;
872                 if (!ret)
873                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
874         }
875
876         return ret;
877 }
878
879 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
880 {
881         int i, ret;
882
883         for (i = 0; i < 100; i++) {
884                 u32 dp_sel;
885
886                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
887                 if (unlikely(ret < 0))
888                         return -EIO;
889
890                 if (dp_sel & DP_SEL_DPRDY_)
891                         return 0;
892
893                 usleep_range(40, 100);
894         }
895
896         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
897
898         return -EIO;
899 }
900
901 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
902                                   u32 addr, u32 length, u32 *buf)
903 {
904         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
905         u32 dp_sel;
906         int i, ret;
907
908         if (usb_autopm_get_interface(dev->intf) < 0)
909                         return 0;
910
911         mutex_lock(&pdata->dataport_mutex);
912
913         ret = lan78xx_dataport_wait_not_busy(dev);
914         if (ret < 0)
915                 goto done;
916
917         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
918
919         dp_sel &= ~DP_SEL_RSEL_MASK_;
920         dp_sel |= ram_select;
921         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
922
923         for (i = 0; i < length; i++) {
924                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
925
926                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
927
928                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
929
930                 ret = lan78xx_dataport_wait_not_busy(dev);
931                 if (ret < 0)
932                         goto done;
933         }
934
935 done:
936         mutex_unlock(&pdata->dataport_mutex);
937         usb_autopm_put_interface(dev->intf);
938
939         return ret;
940 }
941
942 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
943                                     int index, u8 addr[ETH_ALEN])
944 {
945         u32     temp;
946
947         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
948                 temp = addr[3];
949                 temp = addr[2] | (temp << 8);
950                 temp = addr[1] | (temp << 8);
951                 temp = addr[0] | (temp << 8);
952                 pdata->pfilter_table[index][1] = temp;
953                 temp = addr[5];
954                 temp = addr[4] | (temp << 8);
955                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
956                 pdata->pfilter_table[index][0] = temp;
957         }
958 }
959
960 /* returns hash bit number for given MAC address */
961 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
962 {
963         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
964 }
965
966 static void lan78xx_deferred_multicast_write(struct work_struct *param)
967 {
968         struct lan78xx_priv *pdata =
969                         container_of(param, struct lan78xx_priv, set_multicast);
970         struct lan78xx_net *dev = pdata->dev;
971         int i;
972         int ret;
973
974         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
975                   pdata->rfe_ctl);
976
977         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
978                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
979
980         for (i = 1; i < NUM_OF_MAF; i++) {
981                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
982                 ret = lan78xx_write_reg(dev, MAF_LO(i),
983                                         pdata->pfilter_table[i][1]);
984                 ret = lan78xx_write_reg(dev, MAF_HI(i),
985                                         pdata->pfilter_table[i][0]);
986         }
987
988         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
989 }
990
991 static void lan78xx_set_multicast(struct net_device *netdev)
992 {
993         struct lan78xx_net *dev = netdev_priv(netdev);
994         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
995         unsigned long flags;
996         int i;
997
998         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
999
1000         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1001                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1002
1003         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1004                         pdata->mchash_table[i] = 0;
1005         /* pfilter_table[0] has own HW address */
1006         for (i = 1; i < NUM_OF_MAF; i++) {
1007                         pdata->pfilter_table[i][0] =
1008                         pdata->pfilter_table[i][1] = 0;
1009         }
1010
1011         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1012
1013         if (dev->net->flags & IFF_PROMISC) {
1014                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1015                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1016         } else {
1017                 if (dev->net->flags & IFF_ALLMULTI) {
1018                         netif_dbg(dev, drv, dev->net,
1019                                   "receive all multicast enabled");
1020                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1021                 }
1022         }
1023
1024         if (netdev_mc_count(dev->net)) {
1025                 struct netdev_hw_addr *ha;
1026                 int i;
1027
1028                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1029
1030                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1031
1032                 i = 1;
1033                 netdev_for_each_mc_addr(ha, netdev) {
1034                         /* set first 32 into Perfect Filter */
1035                         if (i < 33) {
1036                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1037                         } else {
1038                                 u32 bitnum = lan78xx_hash(ha->addr);
1039
1040                                 pdata->mchash_table[bitnum / 32] |=
1041                                                         (1 << (bitnum % 32));
1042                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1043                         }
1044                         i++;
1045                 }
1046         }
1047
1048         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1049
1050         /* defer register writes to a sleepable context */
1051         schedule_work(&pdata->set_multicast);
1052 }
1053
1054 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1055                                       u16 lcladv, u16 rmtadv)
1056 {
1057         u32 flow = 0, fct_flow = 0;
1058         int ret;
1059         u8 cap;
1060
1061         if (dev->fc_autoneg)
1062                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1063         else
1064                 cap = dev->fc_request_control;
1065
1066         if (cap & FLOW_CTRL_TX)
1067                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1068
1069         if (cap & FLOW_CTRL_RX)
1070                 flow |= FLOW_CR_RX_FCEN_;
1071
1072         if (dev->udev->speed == USB_SPEED_SUPER)
1073                 fct_flow = 0x817;
1074         else if (dev->udev->speed == USB_SPEED_HIGH)
1075                 fct_flow = 0x211;
1076
1077         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1078                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1079                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1080
1081         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1082
1083         /* threshold value should be set before enabling flow */
1084         ret = lan78xx_write_reg(dev, FLOW, flow);
1085
1086         return 0;
1087 }
1088
1089 static int lan78xx_link_reset(struct lan78xx_net *dev)
1090 {
1091         struct phy_device *phydev = dev->net->phydev;
1092         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1093         int ladv, radv, ret;
1094         u32 buf;
1095
1096         /* clear PHY interrupt status */
1097         ret = phy_read(phydev, LAN88XX_INT_STS);
1098         if (unlikely(ret < 0))
1099                 return -EIO;
1100
1101         /* clear LAN78xx interrupt status */
1102         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1103         if (unlikely(ret < 0))
1104                 return -EIO;
1105
1106         phy_read_status(phydev);
1107
1108         if (!phydev->link && dev->link_on) {
1109                 dev->link_on = false;
1110
1111                 /* reset MAC */
1112                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1113                 if (unlikely(ret < 0))
1114                         return -EIO;
1115                 buf |= MAC_CR_RST_;
1116                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1117                 if (unlikely(ret < 0))
1118                         return -EIO;
1119
1120                 phy_mac_interrupt(phydev, 0);
1121
1122                 del_timer(&dev->stat_monitor);
1123         } else if (phydev->link && !dev->link_on) {
1124                 dev->link_on = true;
1125
1126                 phy_ethtool_gset(phydev, &ecmd);
1127
1128                 ret = phy_read(phydev, LAN88XX_INT_STS);
1129
1130                 if (dev->udev->speed == USB_SPEED_SUPER) {
1131                         if (ethtool_cmd_speed(&ecmd) == 1000) {
1132                                 /* disable U2 */
1133                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1134                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1135                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1136                                 /* enable U1 */
1137                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1138                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1139                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1140                         } else {
1141                                 /* enable U1 & U2 */
1142                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1143                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1144                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1145                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1146                         }
1147                 }
1148
1149                 ladv = phy_read(phydev, MII_ADVERTISE);
1150                 if (ladv < 0)
1151                         return ladv;
1152
1153                 radv = phy_read(phydev, MII_LPA);
1154                 if (radv < 0)
1155                         return radv;
1156
1157                 netif_dbg(dev, link, dev->net,
1158                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1159                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1160
1161                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1162                 phy_mac_interrupt(phydev, 1);
1163
1164                 if (!timer_pending(&dev->stat_monitor)) {
1165                         dev->delta = 1;
1166                         mod_timer(&dev->stat_monitor,
1167                                   jiffies + STAT_UPDATE_TIMER);
1168                 }
1169
1170                 tasklet_schedule(&dev->bh);
1171         }
1172
1173         return ret;
1174 }
1175
1176 /* some work can't be done in tasklets, so we use keventd
1177  *
1178  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1179  * but tasklet_schedule() doesn't.      hope the failure is rare.
1180  */
1181 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1182 {
1183         set_bit(work, &dev->flags);
1184         if (!schedule_delayed_work(&dev->wq, 0))
1185                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1186 }
1187
1188 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1189 {
1190         u32 intdata;
1191
1192         if (urb->actual_length != 4) {
1193                 netdev_warn(dev->net,
1194                             "unexpected urb length %d", urb->actual_length);
1195                 return;
1196         }
1197
1198         memcpy(&intdata, urb->transfer_buffer, 4);
1199         le32_to_cpus(&intdata);
1200
1201         if (intdata & INT_ENP_PHY_INT) {
1202                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1203                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1204         } else
1205                 netdev_warn(dev->net,
1206                             "unexpected interrupt: 0x%08x\n", intdata);
1207 }
1208
1209 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1210 {
1211         return MAX_EEPROM_SIZE;
1212 }
1213
1214 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1215                                       struct ethtool_eeprom *ee, u8 *data)
1216 {
1217         struct lan78xx_net *dev = netdev_priv(netdev);
1218
1219         ee->magic = LAN78XX_EEPROM_MAGIC;
1220
1221         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1222 }
1223
1224 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1225                                       struct ethtool_eeprom *ee, u8 *data)
1226 {
1227         struct lan78xx_net *dev = netdev_priv(netdev);
1228
1229         /* Allow entire eeprom update only */
1230         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1231             (ee->offset == 0) &&
1232             (ee->len == 512) &&
1233             (data[0] == EEPROM_INDICATOR))
1234                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1235         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1236                  (ee->offset == 0) &&
1237                  (ee->len == 512) &&
1238                  (data[0] == OTP_INDICATOR_1))
1239                 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1240
1241         return -EINVAL;
1242 }
1243
1244 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1245                                 u8 *data)
1246 {
1247         if (stringset == ETH_SS_STATS)
1248                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1249 }
1250
1251 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1252 {
1253         if (sset == ETH_SS_STATS)
1254                 return ARRAY_SIZE(lan78xx_gstrings);
1255         else
1256                 return -EOPNOTSUPP;
1257 }
1258
1259 static void lan78xx_get_stats(struct net_device *netdev,
1260                               struct ethtool_stats *stats, u64 *data)
1261 {
1262         struct lan78xx_net *dev = netdev_priv(netdev);
1263
1264         lan78xx_update_stats(dev);
1265
1266         mutex_lock(&dev->stats.access_lock);
1267         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1268         mutex_unlock(&dev->stats.access_lock);
1269 }
1270
1271 static void lan78xx_get_wol(struct net_device *netdev,
1272                             struct ethtool_wolinfo *wol)
1273 {
1274         struct lan78xx_net *dev = netdev_priv(netdev);
1275         int ret;
1276         u32 buf;
1277         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1278
1279         if (usb_autopm_get_interface(dev->intf) < 0)
1280                         return;
1281
1282         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1283         if (unlikely(ret < 0)) {
1284                 wol->supported = 0;
1285                 wol->wolopts = 0;
1286         } else {
1287                 if (buf & USB_CFG_RMT_WKP_) {
1288                         wol->supported = WAKE_ALL;
1289                         wol->wolopts = pdata->wol;
1290                 } else {
1291                         wol->supported = 0;
1292                         wol->wolopts = 0;
1293                 }
1294         }
1295
1296         usb_autopm_put_interface(dev->intf);
1297 }
1298
1299 static int lan78xx_set_wol(struct net_device *netdev,
1300                            struct ethtool_wolinfo *wol)
1301 {
1302         struct lan78xx_net *dev = netdev_priv(netdev);
1303         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1304         int ret;
1305
1306         ret = usb_autopm_get_interface(dev->intf);
1307         if (ret < 0)
1308                 return ret;
1309
1310         if (wol->wolopts & ~WAKE_ALL)
1311                 return -EINVAL;
1312
1313         pdata->wol = wol->wolopts;
1314
1315         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1316
1317         phy_ethtool_set_wol(netdev->phydev, wol);
1318
1319         usb_autopm_put_interface(dev->intf);
1320
1321         return ret;
1322 }
1323
1324 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1325 {
1326         struct lan78xx_net *dev = netdev_priv(net);
1327         struct phy_device *phydev = net->phydev;
1328         int ret;
1329         u32 buf;
1330
1331         ret = usb_autopm_get_interface(dev->intf);
1332         if (ret < 0)
1333                 return ret;
1334
1335         ret = phy_ethtool_get_eee(phydev, edata);
1336         if (ret < 0)
1337                 goto exit;
1338
1339         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1340         if (buf & MAC_CR_EEE_EN_) {
1341                 edata->eee_enabled = true;
1342                 edata->eee_active = !!(edata->advertised &
1343                                        edata->lp_advertised);
1344                 edata->tx_lpi_enabled = true;
1345                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1346                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1347                 edata->tx_lpi_timer = buf;
1348         } else {
1349                 edata->eee_enabled = false;
1350                 edata->eee_active = false;
1351                 edata->tx_lpi_enabled = false;
1352                 edata->tx_lpi_timer = 0;
1353         }
1354
1355         ret = 0;
1356 exit:
1357         usb_autopm_put_interface(dev->intf);
1358
1359         return ret;
1360 }
1361
1362 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1363 {
1364         struct lan78xx_net *dev = netdev_priv(net);
1365         int ret;
1366         u32 buf;
1367
1368         ret = usb_autopm_get_interface(dev->intf);
1369         if (ret < 0)
1370                 return ret;
1371
1372         if (edata->eee_enabled) {
1373                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1374                 buf |= MAC_CR_EEE_EN_;
1375                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1376
1377                 phy_ethtool_set_eee(net->phydev, edata);
1378
1379                 buf = (u32)edata->tx_lpi_timer;
1380                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1381         } else {
1382                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1383                 buf &= ~MAC_CR_EEE_EN_;
1384                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1385         }
1386
1387         usb_autopm_put_interface(dev->intf);
1388
1389         return 0;
1390 }
1391
1392 static u32 lan78xx_get_link(struct net_device *net)
1393 {
1394         phy_read_status(net->phydev);
1395
1396         return net->phydev->link;
1397 }
1398
1399 static int lan78xx_nway_reset(struct net_device *net)
1400 {
1401         return phy_start_aneg(net->phydev);
1402 }
1403
1404 static void lan78xx_get_drvinfo(struct net_device *net,
1405                                 struct ethtool_drvinfo *info)
1406 {
1407         struct lan78xx_net *dev = netdev_priv(net);
1408
1409         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1410         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1411         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1412 }
1413
1414 static u32 lan78xx_get_msglevel(struct net_device *net)
1415 {
1416         struct lan78xx_net *dev = netdev_priv(net);
1417
1418         return dev->msg_enable;
1419 }
1420
1421 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1422 {
1423         struct lan78xx_net *dev = netdev_priv(net);
1424
1425         dev->msg_enable = level;
1426 }
1427
1428 static int lan78xx_get_mdix_status(struct net_device *net)
1429 {
1430         struct phy_device *phydev = net->phydev;
1431         int buf;
1432
1433         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1434         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1435         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1436
1437         return buf;
1438 }
1439
1440 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1441 {
1442         struct lan78xx_net *dev = netdev_priv(net);
1443         struct phy_device *phydev = net->phydev;
1444         int buf;
1445
1446         if (mdix_ctrl == ETH_TP_MDI) {
1447                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1448                           LAN88XX_EXT_PAGE_SPACE_1);
1449                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1450                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1451                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1452                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1453                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1454                           LAN88XX_EXT_PAGE_SPACE_0);
1455         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1456                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1457                           LAN88XX_EXT_PAGE_SPACE_1);
1458                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1459                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1460                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1461                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1462                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1463                           LAN88XX_EXT_PAGE_SPACE_0);
1464         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1465                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1466                           LAN88XX_EXT_PAGE_SPACE_1);
1467                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1468                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1469                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1470                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1471                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1472                           LAN88XX_EXT_PAGE_SPACE_0);
1473         }
1474         dev->mdix_ctrl = mdix_ctrl;
1475 }
1476
1477 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1478 {
1479         struct lan78xx_net *dev = netdev_priv(net);
1480         struct phy_device *phydev = net->phydev;
1481         int ret;
1482         int buf;
1483
1484         ret = usb_autopm_get_interface(dev->intf);
1485         if (ret < 0)
1486                 return ret;
1487
1488         ret = phy_ethtool_gset(phydev, cmd);
1489
1490         buf = lan78xx_get_mdix_status(net);
1491
1492         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1493         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1494                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1495                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1496         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1497                 cmd->eth_tp_mdix = ETH_TP_MDI;
1498                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1499         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1500                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1501                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1502         }
1503
1504         usb_autopm_put_interface(dev->intf);
1505
1506         return ret;
1507 }
1508
1509 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1510 {
1511         struct lan78xx_net *dev = netdev_priv(net);
1512         struct phy_device *phydev = net->phydev;
1513         int ret = 0;
1514         int temp;
1515
1516         ret = usb_autopm_get_interface(dev->intf);
1517         if (ret < 0)
1518                 return ret;
1519
1520         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1521                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1522         }
1523
1524         /* change speed & duplex */
1525         ret = phy_ethtool_sset(phydev, cmd);
1526
1527         if (!cmd->autoneg) {
1528                 /* force link down */
1529                 temp = phy_read(phydev, MII_BMCR);
1530                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1531                 mdelay(1);
1532                 phy_write(phydev, MII_BMCR, temp);
1533         }
1534
1535         usb_autopm_put_interface(dev->intf);
1536
1537         return ret;
1538 }
1539
1540 static void lan78xx_get_pause(struct net_device *net,
1541                               struct ethtool_pauseparam *pause)
1542 {
1543         struct lan78xx_net *dev = netdev_priv(net);
1544         struct phy_device *phydev = net->phydev;
1545         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1546
1547         phy_ethtool_gset(phydev, &ecmd);
1548
1549         pause->autoneg = dev->fc_autoneg;
1550
1551         if (dev->fc_request_control & FLOW_CTRL_TX)
1552                 pause->tx_pause = 1;
1553
1554         if (dev->fc_request_control & FLOW_CTRL_RX)
1555                 pause->rx_pause = 1;
1556 }
1557
1558 static int lan78xx_set_pause(struct net_device *net,
1559                              struct ethtool_pauseparam *pause)
1560 {
1561         struct lan78xx_net *dev = netdev_priv(net);
1562         struct phy_device *phydev = net->phydev;
1563         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1564         int ret;
1565
1566         phy_ethtool_gset(phydev, &ecmd);
1567
1568         if (pause->autoneg && !ecmd.autoneg) {
1569                 ret = -EINVAL;
1570                 goto exit;
1571         }
1572
1573         dev->fc_request_control = 0;
1574         if (pause->rx_pause)
1575                 dev->fc_request_control |= FLOW_CTRL_RX;
1576
1577         if (pause->tx_pause)
1578                 dev->fc_request_control |= FLOW_CTRL_TX;
1579
1580         if (ecmd.autoneg) {
1581                 u32 mii_adv;
1582
1583                 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1584                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1585                 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1586                 phy_ethtool_sset(phydev, &ecmd);
1587         }
1588
1589         dev->fc_autoneg = pause->autoneg;
1590
1591         ret = 0;
1592 exit:
1593         return ret;
1594 }
1595
1596 static const struct ethtool_ops lan78xx_ethtool_ops = {
1597         .get_link       = lan78xx_get_link,
1598         .nway_reset     = lan78xx_nway_reset,
1599         .get_drvinfo    = lan78xx_get_drvinfo,
1600         .get_msglevel   = lan78xx_get_msglevel,
1601         .set_msglevel   = lan78xx_set_msglevel,
1602         .get_settings   = lan78xx_get_settings,
1603         .set_settings   = lan78xx_set_settings,
1604         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1605         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1606         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1607         .get_ethtool_stats = lan78xx_get_stats,
1608         .get_sset_count = lan78xx_get_sset_count,
1609         .get_strings    = lan78xx_get_strings,
1610         .get_wol        = lan78xx_get_wol,
1611         .set_wol        = lan78xx_set_wol,
1612         .get_eee        = lan78xx_get_eee,
1613         .set_eee        = lan78xx_set_eee,
1614         .get_pauseparam = lan78xx_get_pause,
1615         .set_pauseparam = lan78xx_set_pause,
1616 };
1617
1618 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1619 {
1620         if (!netif_running(netdev))
1621                 return -EINVAL;
1622
1623         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1624 }
1625
1626 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1627 {
1628         u32 addr_lo, addr_hi;
1629         int ret;
1630         u8 addr[6];
1631
1632         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1633         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1634
1635         addr[0] = addr_lo & 0xFF;
1636         addr[1] = (addr_lo >> 8) & 0xFF;
1637         addr[2] = (addr_lo >> 16) & 0xFF;
1638         addr[3] = (addr_lo >> 24) & 0xFF;
1639         addr[4] = addr_hi & 0xFF;
1640         addr[5] = (addr_hi >> 8) & 0xFF;
1641
1642         if (!is_valid_ether_addr(addr)) {
1643                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1644                         /* valid address present in Device Tree */
1645                         netif_dbg(dev, ifup, dev->net,
1646                                   "MAC address read from Device Tree");
1647                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1648                                                  ETH_ALEN, addr) == 0) ||
1649                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1650                                               ETH_ALEN, addr) == 0)) &&
1651                            is_valid_ether_addr(addr)) {
1652                         /* eeprom values are valid so use them */
1653                         netif_dbg(dev, ifup, dev->net,
1654                                   "MAC address read from EEPROM");
1655                 } else {
1656                         /* generate random MAC */
1657                         random_ether_addr(addr);
1658                         netif_dbg(dev, ifup, dev->net,
1659                                   "MAC address set to random addr");
1660                 }
1661
1662                 addr_lo = addr[0] | (addr[1] << 8) |
1663                           (addr[2] << 16) | (addr[3] << 24);
1664                 addr_hi = addr[4] | (addr[5] << 8);
1665
1666                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1667                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1668         }
1669
1670         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1671         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1672
1673         ether_addr_copy(dev->net->dev_addr, addr);
1674 }
1675
1676 /* MDIO read and write wrappers for phylib */
1677 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1678 {
1679         struct lan78xx_net *dev = bus->priv;
1680         u32 val, addr;
1681         int ret;
1682
1683         ret = usb_autopm_get_interface(dev->intf);
1684         if (ret < 0)
1685                 return ret;
1686
1687         mutex_lock(&dev->phy_mutex);
1688
1689         /* confirm MII not busy */
1690         ret = lan78xx_phy_wait_not_busy(dev);
1691         if (ret < 0)
1692                 goto done;
1693
1694         /* set the address, index & direction (read from PHY) */
1695         addr = mii_access(phy_id, idx, MII_READ);
1696         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1697
1698         ret = lan78xx_phy_wait_not_busy(dev);
1699         if (ret < 0)
1700                 goto done;
1701
1702         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1703
1704         ret = (int)(val & 0xFFFF);
1705
1706 done:
1707         mutex_unlock(&dev->phy_mutex);
1708         usb_autopm_put_interface(dev->intf);
1709         return ret;
1710 }
1711
1712 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1713                                  u16 regval)
1714 {
1715         struct lan78xx_net *dev = bus->priv;
1716         u32 val, addr;
1717         int ret;
1718
1719         ret = usb_autopm_get_interface(dev->intf);
1720         if (ret < 0)
1721                 return ret;
1722
1723         mutex_lock(&dev->phy_mutex);
1724
1725         /* confirm MII not busy */
1726         ret = lan78xx_phy_wait_not_busy(dev);
1727         if (ret < 0)
1728                 goto done;
1729
1730         val = (u32)regval;
1731         ret = lan78xx_write_reg(dev, MII_DATA, val);
1732
1733         /* set the address, index & direction (write to PHY) */
1734         addr = mii_access(phy_id, idx, MII_WRITE);
1735         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1736
1737         ret = lan78xx_phy_wait_not_busy(dev);
1738         if (ret < 0)
1739                 goto done;
1740
1741 done:
1742         mutex_unlock(&dev->phy_mutex);
1743         usb_autopm_put_interface(dev->intf);
1744         return 0;
1745 }
1746
1747 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1748 {
1749         int ret;
1750
1751         dev->mdiobus = mdiobus_alloc();
1752         if (!dev->mdiobus) {
1753                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1754                 return -ENOMEM;
1755         }
1756
1757         dev->mdiobus->priv = (void *)dev;
1758         dev->mdiobus->read = lan78xx_mdiobus_read;
1759         dev->mdiobus->write = lan78xx_mdiobus_write;
1760         dev->mdiobus->name = "lan78xx-mdiobus";
1761         dev->mdiobus->parent = &dev->udev->dev;
1762
1763         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1764                  dev->udev->bus->busnum, dev->udev->devnum);
1765
1766         switch (dev->chipid) {
1767         case ID_REV_CHIP_ID_7800_:
1768         case ID_REV_CHIP_ID_7850_:
1769                 /* set to internal PHY id */
1770                 dev->mdiobus->phy_mask = ~(1 << 1);
1771                 break;
1772         }
1773
1774         ret = mdiobus_register(dev->mdiobus);
1775         if (ret) {
1776                 netdev_err(dev->net, "can't register MDIO bus\n");
1777                 goto exit1;
1778         }
1779
1780         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1781         return 0;
1782 exit1:
1783         mdiobus_free(dev->mdiobus);
1784         return ret;
1785 }
1786
1787 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1788 {
1789         mdiobus_unregister(dev->mdiobus);
1790         mdiobus_free(dev->mdiobus);
1791 }
1792
1793 static void lan78xx_link_status_change(struct net_device *net)
1794 {
1795         struct phy_device *phydev = net->phydev;
1796         int ret, temp;
1797
1798         /* At forced 100 F/H mode, chip may fail to set mode correctly
1799          * when cable is switched between long(~50+m) and short one.
1800          * As workaround, set to 10 before setting to 100
1801          * at forced 100 F/H mode.
1802          */
1803         if (!phydev->autoneg && (phydev->speed == 100)) {
1804                 /* disable phy interrupt */
1805                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1806                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1807                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1808
1809                 temp = phy_read(phydev, MII_BMCR);
1810                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1811                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1812                 temp |= BMCR_SPEED100;
1813                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1814
1815                 /* clear pending interrupt generated while workaround */
1816                 temp = phy_read(phydev, LAN88XX_INT_STS);
1817
1818                 /* enable phy interrupt back */
1819                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1820                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1821                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1822         }
1823 }
1824
1825 static int lan78xx_phy_init(struct lan78xx_net *dev)
1826 {
1827         int ret;
1828         u32 mii_adv;
1829         struct phy_device *phydev = dev->net->phydev;
1830
1831         phydev = phy_find_first(dev->mdiobus);
1832         if (!phydev) {
1833                 netdev_err(dev->net, "no PHY found\n");
1834                 return -EIO;
1835         }
1836
1837         /* Enable PHY interrupts.
1838          * We handle our own interrupt
1839          */
1840         ret = phy_read(phydev, LAN88XX_INT_STS);
1841         ret = phy_write(phydev, LAN88XX_INT_MASK,
1842                         LAN88XX_INT_MASK_MDINTPIN_EN_ |
1843                         LAN88XX_INT_MASK_LINK_CHANGE_);
1844
1845         phydev->irq = PHY_IGNORE_INTERRUPT;
1846
1847         ret = phy_connect_direct(dev->net, phydev,
1848                                  lan78xx_link_status_change,
1849                                  PHY_INTERFACE_MODE_GMII);
1850         if (ret) {
1851                 netdev_err(dev->net, "can't attach PHY to %s\n",
1852                            dev->mdiobus->id);
1853                 return -EIO;
1854         }
1855
1856         /* set to AUTOMDIX */
1857         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1858
1859         /* MAC doesn't support 1000T Half */
1860         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1861
1862         /* support both flow controls */
1863         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1864         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1865         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1866         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1867
1868         genphy_config_aneg(phydev);
1869
1870         dev->fc_autoneg = phydev->autoneg;
1871
1872         phy_start(phydev);
1873
1874         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1875
1876         return 0;
1877 }
1878
1879 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1880 {
1881         int ret = 0;
1882         u32 buf;
1883         bool rxenabled;
1884
1885         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1886
1887         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1888
1889         if (rxenabled) {
1890                 buf &= ~MAC_RX_RXEN_;
1891                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1892         }
1893
1894         /* add 4 to size for FCS */
1895         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1896         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1897
1898         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1899
1900         if (rxenabled) {
1901                 buf |= MAC_RX_RXEN_;
1902                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1903         }
1904
1905         return 0;
1906 }
1907
1908 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1909 {
1910         struct sk_buff *skb;
1911         unsigned long flags;
1912         int count = 0;
1913
1914         spin_lock_irqsave(&q->lock, flags);
1915         while (!skb_queue_empty(q)) {
1916                 struct skb_data *entry;
1917                 struct urb *urb;
1918                 int ret;
1919
1920                 skb_queue_walk(q, skb) {
1921                         entry = (struct skb_data *)skb->cb;
1922                         if (entry->state != unlink_start)
1923                                 goto found;
1924                 }
1925                 break;
1926 found:
1927                 entry->state = unlink_start;
1928                 urb = entry->urb;
1929
1930                 /* Get reference count of the URB to avoid it to be
1931                  * freed during usb_unlink_urb, which may trigger
1932                  * use-after-free problem inside usb_unlink_urb since
1933                  * usb_unlink_urb is always racing with .complete
1934                  * handler(include defer_bh).
1935                  */
1936                 usb_get_urb(urb);
1937                 spin_unlock_irqrestore(&q->lock, flags);
1938                 /* during some PM-driven resume scenarios,
1939                  * these (async) unlinks complete immediately
1940                  */
1941                 ret = usb_unlink_urb(urb);
1942                 if (ret != -EINPROGRESS && ret != 0)
1943                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1944                 else
1945                         count++;
1946                 usb_put_urb(urb);
1947                 spin_lock_irqsave(&q->lock, flags);
1948         }
1949         spin_unlock_irqrestore(&q->lock, flags);
1950         return count;
1951 }
1952
1953 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1954 {
1955         struct lan78xx_net *dev = netdev_priv(netdev);
1956         int ll_mtu = new_mtu + netdev->hard_header_len;
1957         int old_hard_mtu = dev->hard_mtu;
1958         int old_rx_urb_size = dev->rx_urb_size;
1959         int ret;
1960
1961         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1962                 return -EINVAL;
1963
1964         if (new_mtu <= 0)
1965                 return -EINVAL;
1966         /* no second zero-length packet read wanted after mtu-sized packets */
1967         if ((ll_mtu % dev->maxpacket) == 0)
1968                 return -EDOM;
1969
1970         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1971
1972         netdev->mtu = new_mtu;
1973
1974         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1975         if (dev->rx_urb_size == old_hard_mtu) {
1976                 dev->rx_urb_size = dev->hard_mtu;
1977                 if (dev->rx_urb_size > old_rx_urb_size) {
1978                         if (netif_running(dev->net)) {
1979                                 unlink_urbs(dev, &dev->rxq);
1980                                 tasklet_schedule(&dev->bh);
1981                         }
1982                 }
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1989 {
1990         struct lan78xx_net *dev = netdev_priv(netdev);
1991         struct sockaddr *addr = p;
1992         u32 addr_lo, addr_hi;
1993         int ret;
1994
1995         if (netif_running(netdev))
1996                 return -EBUSY;
1997
1998         if (!is_valid_ether_addr(addr->sa_data))
1999                 return -EADDRNOTAVAIL;
2000
2001         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2002
2003         addr_lo = netdev->dev_addr[0] |
2004                   netdev->dev_addr[1] << 8 |
2005                   netdev->dev_addr[2] << 16 |
2006                   netdev->dev_addr[3] << 24;
2007         addr_hi = netdev->dev_addr[4] |
2008                   netdev->dev_addr[5] << 8;
2009
2010         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2011         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2012
2013         /* Added to support MAC address changes */
2014         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2015         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2016
2017         return 0;
2018 }
2019
2020 /* Enable or disable Rx checksum offload engine */
2021 static int lan78xx_set_features(struct net_device *netdev,
2022                                 netdev_features_t features)
2023 {
2024         struct lan78xx_net *dev = netdev_priv(netdev);
2025         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2026         unsigned long flags;
2027         int ret;
2028
2029         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2030
2031         if (features & NETIF_F_RXCSUM) {
2032                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2033                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2034         } else {
2035                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2036                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2037         }
2038
2039         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2040                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2041         else
2042                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2043
2044         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2045
2046         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2047
2048         return 0;
2049 }
2050
2051 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2052 {
2053         struct lan78xx_priv *pdata =
2054                         container_of(param, struct lan78xx_priv, set_vlan);
2055         struct lan78xx_net *dev = pdata->dev;
2056
2057         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2058                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2059 }
2060
2061 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2062                                    __be16 proto, u16 vid)
2063 {
2064         struct lan78xx_net *dev = netdev_priv(netdev);
2065         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2066         u16 vid_bit_index;
2067         u16 vid_dword_index;
2068
2069         vid_dword_index = (vid >> 5) & 0x7F;
2070         vid_bit_index = vid & 0x1F;
2071
2072         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2073
2074         /* defer register writes to a sleepable context */
2075         schedule_work(&pdata->set_vlan);
2076
2077         return 0;
2078 }
2079
2080 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2081                                     __be16 proto, u16 vid)
2082 {
2083         struct lan78xx_net *dev = netdev_priv(netdev);
2084         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2085         u16 vid_bit_index;
2086         u16 vid_dword_index;
2087
2088         vid_dword_index = (vid >> 5) & 0x7F;
2089         vid_bit_index = vid & 0x1F;
2090
2091         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2092
2093         /* defer register writes to a sleepable context */
2094         schedule_work(&pdata->set_vlan);
2095
2096         return 0;
2097 }
2098
2099 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2100 {
2101         int ret;
2102         u32 buf;
2103         u32 regs[6] = { 0 };
2104
2105         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2106         if (buf & USB_CFG1_LTM_ENABLE_) {
2107                 u8 temp[2];
2108                 /* Get values from EEPROM first */
2109                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2110                         if (temp[0] == 24) {
2111                                 ret = lan78xx_read_raw_eeprom(dev,
2112                                                               temp[1] * 2,
2113                                                               24,
2114                                                               (u8 *)regs);
2115                                 if (ret < 0)
2116                                         return;
2117                         }
2118                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2119                         if (temp[0] == 24) {
2120                                 ret = lan78xx_read_raw_otp(dev,
2121                                                            temp[1] * 2,
2122                                                            24,
2123                                                            (u8 *)regs);
2124                                 if (ret < 0)
2125                                         return;
2126                         }
2127                 }
2128         }
2129
2130         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2131         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2132         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2133         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2134         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2135         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2136 }
2137
2138 static int lan78xx_reset(struct lan78xx_net *dev)
2139 {
2140         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2141         u32 buf;
2142         int ret = 0;
2143         unsigned long timeout;
2144
2145         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2146         buf |= HW_CFG_LRST_;
2147         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2148
2149         timeout = jiffies + HZ;
2150         do {
2151                 mdelay(1);
2152                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2153                 if (time_after(jiffies, timeout)) {
2154                         netdev_warn(dev->net,
2155                                     "timeout on completion of LiteReset");
2156                         return -EIO;
2157                 }
2158         } while (buf & HW_CFG_LRST_);
2159
2160         lan78xx_init_mac_address(dev);
2161
2162         /* save DEVID for later usage */
2163         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2164         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2165         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2166
2167         /* Respond to the IN token with a NAK */
2168         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2169         buf |= USB_CFG_BIR_;
2170         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2171
2172         /* Init LTM */
2173         lan78xx_init_ltm(dev);
2174
2175         dev->net->hard_header_len += TX_OVERHEAD;
2176         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2177
2178         if (dev->udev->speed == USB_SPEED_SUPER) {
2179                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2180                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2181                 dev->rx_qlen = 4;
2182                 dev->tx_qlen = 4;
2183         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2184                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2185                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2186                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2187                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2188         } else {
2189                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2190                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2191                 dev->rx_qlen = 4;
2192                 dev->tx_qlen = 4;
2193         }
2194
2195         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2196         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2197
2198         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2199         buf |= HW_CFG_MEF_;
2200         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2201
2202         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2203         buf |= USB_CFG_BCE_;
2204         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2205
2206         /* set FIFO sizes */
2207         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2208         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2209
2210         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2211         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2212
2213         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2214         ret = lan78xx_write_reg(dev, FLOW, 0);
2215         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2216
2217         /* Don't need rfe_ctl_lock during initialisation */
2218         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2219         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2220         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2221
2222         /* Enable or disable checksum offload engines */
2223         lan78xx_set_features(dev->net, dev->net->features);
2224
2225         lan78xx_set_multicast(dev->net);
2226
2227         /* reset PHY */
2228         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2229         buf |= PMT_CTL_PHY_RST_;
2230         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2231
2232         timeout = jiffies + HZ;
2233         do {
2234                 mdelay(1);
2235                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2236                 if (time_after(jiffies, timeout)) {
2237                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2238                         return -EIO;
2239                 }
2240         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2241
2242         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2243         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2244         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2245
2246         /* enable PHY interrupts */
2247         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2248         buf |= INT_ENP_PHY_INT;
2249         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2250
2251         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2252         buf |= MAC_TX_TXEN_;
2253         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2254
2255         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2256         buf |= FCT_TX_CTL_EN_;
2257         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2258
2259         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2260
2261         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2262         buf |= MAC_RX_RXEN_;
2263         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2264
2265         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2266         buf |= FCT_RX_CTL_EN_;
2267         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2268
2269         return 0;
2270 }
2271
2272 static void lan78xx_init_stats(struct lan78xx_net *dev)
2273 {
2274         u32 *p;
2275         int i;
2276
2277         /* initialize for stats update
2278          * some counters are 20bits and some are 32bits
2279          */
2280         p = (u32 *)&dev->stats.rollover_max;
2281         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2282                 p[i] = 0xFFFFF;
2283
2284         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2285         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2286         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2287         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2288         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2289         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2290         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2291         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2292         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2293         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2294
2295         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2296 }
2297
2298 static int lan78xx_open(struct net_device *net)
2299 {
2300         struct lan78xx_net *dev = netdev_priv(net);
2301         int ret;
2302
2303         ret = usb_autopm_get_interface(dev->intf);
2304         if (ret < 0)
2305                 goto out;
2306
2307         ret = lan78xx_reset(dev);
2308         if (ret < 0)
2309                 goto done;
2310
2311         ret = lan78xx_phy_init(dev);
2312         if (ret < 0)
2313                 goto done;
2314
2315         /* for Link Check */
2316         if (dev->urb_intr) {
2317                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2318                 if (ret < 0) {
2319                         netif_err(dev, ifup, dev->net,
2320                                   "intr submit %d\n", ret);
2321                         goto done;
2322                 }
2323         }
2324
2325         lan78xx_init_stats(dev);
2326
2327         set_bit(EVENT_DEV_OPEN, &dev->flags);
2328
2329         netif_start_queue(net);
2330
2331         dev->link_on = false;
2332
2333         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2334 done:
2335         usb_autopm_put_interface(dev->intf);
2336
2337 out:
2338         return ret;
2339 }
2340
2341 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2342 {
2343         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2344         DECLARE_WAITQUEUE(wait, current);
2345         int temp;
2346
2347         /* ensure there are no more active urbs */
2348         add_wait_queue(&unlink_wakeup, &wait);
2349         set_current_state(TASK_UNINTERRUPTIBLE);
2350         dev->wait = &unlink_wakeup;
2351         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2352
2353         /* maybe wait for deletions to finish. */
2354         while (!skb_queue_empty(&dev->rxq) &&
2355                !skb_queue_empty(&dev->txq) &&
2356                !skb_queue_empty(&dev->done)) {
2357                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2358                 set_current_state(TASK_UNINTERRUPTIBLE);
2359                 netif_dbg(dev, ifdown, dev->net,
2360                           "waited for %d urb completions\n", temp);
2361         }
2362         set_current_state(TASK_RUNNING);
2363         dev->wait = NULL;
2364         remove_wait_queue(&unlink_wakeup, &wait);
2365 }
2366
2367 static int lan78xx_stop(struct net_device *net)
2368 {
2369         struct lan78xx_net              *dev = netdev_priv(net);
2370
2371         if (timer_pending(&dev->stat_monitor))
2372                 del_timer_sync(&dev->stat_monitor);
2373
2374         phy_stop(net->phydev);
2375         phy_disconnect(net->phydev);
2376         net->phydev = NULL;
2377
2378         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2379         netif_stop_queue(net);
2380
2381         netif_info(dev, ifdown, dev->net,
2382                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2383                    net->stats.rx_packets, net->stats.tx_packets,
2384                    net->stats.rx_errors, net->stats.tx_errors);
2385
2386         lan78xx_terminate_urbs(dev);
2387
2388         usb_kill_urb(dev->urb_intr);
2389
2390         skb_queue_purge(&dev->rxq_pause);
2391
2392         /* deferred work (task, timer, softirq) must also stop.
2393          * can't flush_scheduled_work() until we drop rtnl (later),
2394          * else workers could deadlock; so make workers a NOP.
2395          */
2396         dev->flags = 0;
2397         cancel_delayed_work_sync(&dev->wq);
2398         tasklet_kill(&dev->bh);
2399
2400         usb_autopm_put_interface(dev->intf);
2401
2402         return 0;
2403 }
2404
2405 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2406                                        struct sk_buff *skb, gfp_t flags)
2407 {
2408         u32 tx_cmd_a, tx_cmd_b;
2409
2410         if (skb_cow_head(skb, TX_OVERHEAD)) {
2411                 dev_kfree_skb_any(skb);
2412                 return NULL;
2413         }
2414
2415         if (skb_linearize(skb)) {
2416                 dev_kfree_skb_any(skb);
2417                 return NULL;
2418         }
2419
2420         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2421
2422         if (skb->ip_summed == CHECKSUM_PARTIAL)
2423                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2424
2425         tx_cmd_b = 0;
2426         if (skb_is_gso(skb)) {
2427                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2428
2429                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2430
2431                 tx_cmd_a |= TX_CMD_A_LSO_;
2432         }
2433
2434         if (skb_vlan_tag_present(skb)) {
2435                 tx_cmd_a |= TX_CMD_A_IVTG_;
2436                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2437         }
2438
2439         skb_push(skb, 4);
2440         cpu_to_le32s(&tx_cmd_b);
2441         memcpy(skb->data, &tx_cmd_b, 4);
2442
2443         skb_push(skb, 4);
2444         cpu_to_le32s(&tx_cmd_a);
2445         memcpy(skb->data, &tx_cmd_a, 4);
2446
2447         return skb;
2448 }
2449
2450 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2451                                struct sk_buff_head *list, enum skb_state state)
2452 {
2453         unsigned long flags;
2454         enum skb_state old_state;
2455         struct skb_data *entry = (struct skb_data *)skb->cb;
2456
2457         spin_lock_irqsave(&list->lock, flags);
2458         old_state = entry->state;
2459         entry->state = state;
2460
2461         __skb_unlink(skb, list);
2462         spin_unlock(&list->lock);
2463         spin_lock(&dev->done.lock);
2464
2465         __skb_queue_tail(&dev->done, skb);
2466         if (skb_queue_len(&dev->done) == 1)
2467                 tasklet_schedule(&dev->bh);
2468         spin_unlock_irqrestore(&dev->done.lock, flags);
2469
2470         return old_state;
2471 }
2472
2473 static void tx_complete(struct urb *urb)
2474 {
2475         struct sk_buff *skb = (struct sk_buff *)urb->context;
2476         struct skb_data *entry = (struct skb_data *)skb->cb;
2477         struct lan78xx_net *dev = entry->dev;
2478
2479         if (urb->status == 0) {
2480                 dev->net->stats.tx_packets += entry->num_of_packet;
2481                 dev->net->stats.tx_bytes += entry->length;
2482         } else {
2483                 dev->net->stats.tx_errors++;
2484
2485                 switch (urb->status) {
2486                 case -EPIPE:
2487                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2488                         break;
2489
2490                 /* software-driven interface shutdown */
2491                 case -ECONNRESET:
2492                 case -ESHUTDOWN:
2493                         break;
2494
2495                 case -EPROTO:
2496                 case -ETIME:
2497                 case -EILSEQ:
2498                         netif_stop_queue(dev->net);
2499                         break;
2500                 default:
2501                         netif_dbg(dev, tx_err, dev->net,
2502                                   "tx err %d\n", entry->urb->status);
2503                         break;
2504                 }
2505         }
2506
2507         usb_autopm_put_interface_async(dev->intf);
2508
2509         defer_bh(dev, skb, &dev->txq, tx_done);
2510 }
2511
2512 static void lan78xx_queue_skb(struct sk_buff_head *list,
2513                               struct sk_buff *newsk, enum skb_state state)
2514 {
2515         struct skb_data *entry = (struct skb_data *)newsk->cb;
2516
2517         __skb_queue_tail(list, newsk);
2518         entry->state = state;
2519 }
2520
2521 static netdev_tx_t
2522 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2523 {
2524         struct lan78xx_net *dev = netdev_priv(net);
2525         struct sk_buff *skb2 = NULL;
2526
2527         if (skb) {
2528                 skb_tx_timestamp(skb);
2529                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2530         }
2531
2532         if (skb2) {
2533                 skb_queue_tail(&dev->txq_pend, skb2);
2534
2535                 /* throttle TX patch at slower than SUPER SPEED USB */
2536                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2537                     (skb_queue_len(&dev->txq_pend) > 10))
2538                         netif_stop_queue(net);
2539         } else {
2540                 netif_dbg(dev, tx_err, dev->net,
2541                           "lan78xx_tx_prep return NULL\n");
2542                 dev->net->stats.tx_errors++;
2543                 dev->net->stats.tx_dropped++;
2544         }
2545
2546         tasklet_schedule(&dev->bh);
2547
2548         return NETDEV_TX_OK;
2549 }
2550
2551 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2552 {
2553         struct lan78xx_priv *pdata = NULL;
2554         int ret;
2555         int i;
2556
2557         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2558
2559         pdata = (struct lan78xx_priv *)(dev->data[0]);
2560         if (!pdata) {
2561                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2562                 return -ENOMEM;
2563         }
2564
2565         pdata->dev = dev;
2566
2567         spin_lock_init(&pdata->rfe_ctl_lock);
2568         mutex_init(&pdata->dataport_mutex);
2569
2570         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2571
2572         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2573                 pdata->vlan_table[i] = 0;
2574
2575         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2576
2577         dev->net->features = 0;
2578
2579         if (DEFAULT_TX_CSUM_ENABLE)
2580                 dev->net->features |= NETIF_F_HW_CSUM;
2581
2582         if (DEFAULT_RX_CSUM_ENABLE)
2583                 dev->net->features |= NETIF_F_RXCSUM;
2584
2585         if (DEFAULT_TSO_CSUM_ENABLE)
2586                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2587
2588         dev->net->hw_features = dev->net->features;
2589
2590         /* Init all registers */
2591         ret = lan78xx_reset(dev);
2592
2593         lan78xx_mdio_init(dev);
2594
2595         dev->net->flags |= IFF_MULTICAST;
2596
2597         pdata->wol = WAKE_MAGIC;
2598
2599         return 0;
2600 }
2601
2602 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2603 {
2604         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2605
2606         lan78xx_remove_mdio(dev);
2607
2608         if (pdata) {
2609                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2610                 kfree(pdata);
2611                 pdata = NULL;
2612                 dev->data[0] = 0;
2613         }
2614 }
2615
2616 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2617                                     struct sk_buff *skb,
2618                                     u32 rx_cmd_a, u32 rx_cmd_b)
2619 {
2620         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2621             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2622                 skb->ip_summed = CHECKSUM_NONE;
2623         } else {
2624                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2625                 skb->ip_summed = CHECKSUM_COMPLETE;
2626         }
2627 }
2628
2629 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2630 {
2631         int             status;
2632
2633         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2634                 skb_queue_tail(&dev->rxq_pause, skb);
2635                 return;
2636         }
2637
2638         dev->net->stats.rx_packets++;
2639         dev->net->stats.rx_bytes += skb->len;
2640
2641         skb->protocol = eth_type_trans(skb, dev->net);
2642
2643         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2644                   skb->len + sizeof(struct ethhdr), skb->protocol);
2645         memset(skb->cb, 0, sizeof(struct skb_data));
2646
2647         if (skb_defer_rx_timestamp(skb))
2648                 return;
2649
2650         status = netif_rx(skb);
2651         if (status != NET_RX_SUCCESS)
2652                 netif_dbg(dev, rx_err, dev->net,
2653                           "netif_rx status %d\n", status);
2654 }
2655
2656 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2657 {
2658         if (skb->len < dev->net->hard_header_len)
2659                 return 0;
2660
2661         while (skb->len > 0) {
2662                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2663                 u16 rx_cmd_c;
2664                 struct sk_buff *skb2;
2665                 unsigned char *packet;
2666
2667                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2668                 le32_to_cpus(&rx_cmd_a);
2669                 skb_pull(skb, sizeof(rx_cmd_a));
2670
2671                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2672                 le32_to_cpus(&rx_cmd_b);
2673                 skb_pull(skb, sizeof(rx_cmd_b));
2674
2675                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2676                 le16_to_cpus(&rx_cmd_c);
2677                 skb_pull(skb, sizeof(rx_cmd_c));
2678
2679                 packet = skb->data;
2680
2681                 /* get the packet length */
2682                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2683                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2684
2685                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2686                         netif_dbg(dev, rx_err, dev->net,
2687                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2688                 } else {
2689                         /* last frame in this batch */
2690                         if (skb->len == size) {
2691                                 lan78xx_rx_csum_offload(dev, skb,
2692                                                         rx_cmd_a, rx_cmd_b);
2693
2694                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2695                                 skb->truesize = size + sizeof(struct sk_buff);
2696
2697                                 return 1;
2698                         }
2699
2700                         skb2 = skb_clone(skb, GFP_ATOMIC);
2701                         if (unlikely(!skb2)) {
2702                                 netdev_warn(dev->net, "Error allocating skb");
2703                                 return 0;
2704                         }
2705
2706                         skb2->len = size;
2707                         skb2->data = packet;
2708                         skb_set_tail_pointer(skb2, size);
2709
2710                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2711
2712                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2713                         skb2->truesize = size + sizeof(struct sk_buff);
2714
2715                         lan78xx_skb_return(dev, skb2);
2716                 }
2717
2718                 skb_pull(skb, size);
2719
2720                 /* padding bytes before the next frame starts */
2721                 if (skb->len)
2722                         skb_pull(skb, align_count);
2723         }
2724
2725         return 1;
2726 }
2727
2728 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2729 {
2730         if (!lan78xx_rx(dev, skb)) {
2731                 dev->net->stats.rx_errors++;
2732                 goto done;
2733         }
2734
2735         if (skb->len) {
2736                 lan78xx_skb_return(dev, skb);
2737                 return;
2738         }
2739
2740         netif_dbg(dev, rx_err, dev->net, "drop\n");
2741         dev->net->stats.rx_errors++;
2742 done:
2743         skb_queue_tail(&dev->done, skb);
2744 }
2745
2746 static void rx_complete(struct urb *urb);
2747
2748 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2749 {
2750         struct sk_buff *skb;
2751         struct skb_data *entry;
2752         unsigned long lockflags;
2753         size_t size = dev->rx_urb_size;
2754         int ret = 0;
2755
2756         skb = netdev_alloc_skb_ip_align(dev->net, size);
2757         if (!skb) {
2758                 usb_free_urb(urb);
2759                 return -ENOMEM;
2760         }
2761
2762         entry = (struct skb_data *)skb->cb;
2763         entry->urb = urb;
2764         entry->dev = dev;
2765         entry->length = 0;
2766
2767         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2768                           skb->data, size, rx_complete, skb);
2769
2770         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2771
2772         if (netif_device_present(dev->net) &&
2773             netif_running(dev->net) &&
2774             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2775             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2776                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2777                 switch (ret) {
2778                 case 0:
2779                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2780                         break;
2781                 case -EPIPE:
2782                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2783                         break;
2784                 case -ENODEV:
2785                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2786                         netif_device_detach(dev->net);
2787                         break;
2788                 case -EHOSTUNREACH:
2789                         ret = -ENOLINK;
2790                         break;
2791                 default:
2792                         netif_dbg(dev, rx_err, dev->net,
2793                                   "rx submit, %d\n", ret);
2794                         tasklet_schedule(&dev->bh);
2795                 }
2796         } else {
2797                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2798                 ret = -ENOLINK;
2799         }
2800         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2801         if (ret) {
2802                 dev_kfree_skb_any(skb);
2803                 usb_free_urb(urb);
2804         }
2805         return ret;
2806 }
2807
2808 static void rx_complete(struct urb *urb)
2809 {
2810         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2811         struct skb_data *entry = (struct skb_data *)skb->cb;
2812         struct lan78xx_net *dev = entry->dev;
2813         int urb_status = urb->status;
2814         enum skb_state state;
2815
2816         skb_put(skb, urb->actual_length);
2817         state = rx_done;
2818         entry->urb = NULL;
2819
2820         switch (urb_status) {
2821         case 0:
2822                 if (skb->len < dev->net->hard_header_len) {
2823                         state = rx_cleanup;
2824                         dev->net->stats.rx_errors++;
2825                         dev->net->stats.rx_length_errors++;
2826                         netif_dbg(dev, rx_err, dev->net,
2827                                   "rx length %d\n", skb->len);
2828                 }
2829                 usb_mark_last_busy(dev->udev);
2830                 break;
2831         case -EPIPE:
2832                 dev->net->stats.rx_errors++;
2833                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2834                 /* FALLTHROUGH */
2835         case -ECONNRESET:                               /* async unlink */
2836         case -ESHUTDOWN:                                /* hardware gone */
2837                 netif_dbg(dev, ifdown, dev->net,
2838                           "rx shutdown, code %d\n", urb_status);
2839                 state = rx_cleanup;
2840                 entry->urb = urb;
2841                 urb = NULL;
2842                 break;
2843         case -EPROTO:
2844         case -ETIME:
2845         case -EILSEQ:
2846                 dev->net->stats.rx_errors++;
2847                 state = rx_cleanup;
2848                 entry->urb = urb;
2849                 urb = NULL;
2850                 break;
2851
2852         /* data overrun ... flush fifo? */
2853         case -EOVERFLOW:
2854                 dev->net->stats.rx_over_errors++;
2855                 /* FALLTHROUGH */
2856
2857         default:
2858                 state = rx_cleanup;
2859                 dev->net->stats.rx_errors++;
2860                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2861                 break;
2862         }
2863
2864         state = defer_bh(dev, skb, &dev->rxq, state);
2865
2866         if (urb) {
2867                 if (netif_running(dev->net) &&
2868                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2869                     state != unlink_start) {
2870                         rx_submit(dev, urb, GFP_ATOMIC);
2871                         return;
2872                 }
2873                 usb_free_urb(urb);
2874         }
2875         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2876 }
2877
2878 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2879 {
2880         int length;
2881         struct urb *urb = NULL;
2882         struct skb_data *entry;
2883         unsigned long flags;
2884         struct sk_buff_head *tqp = &dev->txq_pend;
2885         struct sk_buff *skb, *skb2;
2886         int ret;
2887         int count, pos;
2888         int skb_totallen, pkt_cnt;
2889
2890         skb_totallen = 0;
2891         pkt_cnt = 0;
2892         count = 0;
2893         length = 0;
2894         spin_lock_irqsave(&tqp->lock, flags);
2895         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2896                 if (skb_is_gso(skb)) {
2897                         if (pkt_cnt) {
2898                                 /* handle previous packets first */
2899                                 break;
2900                         }
2901                         count = 1;
2902                         length = skb->len - TX_OVERHEAD;
2903                         __skb_unlink(skb, tqp);
2904                         spin_unlock_irqrestore(&tqp->lock, flags);
2905                         goto gso_skb;
2906                 }
2907
2908                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2909                         break;
2910                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2911                 pkt_cnt++;
2912         }
2913         spin_unlock_irqrestore(&tqp->lock, flags);
2914
2915         /* copy to a single skb */
2916         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2917         if (!skb)
2918                 goto drop;
2919
2920         skb_put(skb, skb_totallen);
2921
2922         for (count = pos = 0; count < pkt_cnt; count++) {
2923                 skb2 = skb_dequeue(tqp);
2924                 if (skb2) {
2925                         length += (skb2->len - TX_OVERHEAD);
2926                         memcpy(skb->data + pos, skb2->data, skb2->len);
2927                         pos += roundup(skb2->len, sizeof(u32));
2928                         dev_kfree_skb(skb2);
2929                 }
2930         }
2931
2932 gso_skb:
2933         urb = usb_alloc_urb(0, GFP_ATOMIC);
2934         if (!urb)
2935                 goto drop;
2936
2937         entry = (struct skb_data *)skb->cb;
2938         entry->urb = urb;
2939         entry->dev = dev;
2940         entry->length = length;
2941         entry->num_of_packet = count;
2942
2943         spin_lock_irqsave(&dev->txq.lock, flags);
2944         ret = usb_autopm_get_interface_async(dev->intf);
2945         if (ret < 0) {
2946                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2947                 goto drop;
2948         }
2949
2950         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2951                           skb->data, skb->len, tx_complete, skb);
2952
2953         if (length % dev->maxpacket == 0) {
2954                 /* send USB_ZERO_PACKET */
2955                 urb->transfer_flags |= URB_ZERO_PACKET;
2956         }
2957
2958 #ifdef CONFIG_PM
2959         /* if this triggers the device is still a sleep */
2960         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2961                 /* transmission will be done in resume */
2962                 usb_anchor_urb(urb, &dev->deferred);
2963                 /* no use to process more packets */
2964                 netif_stop_queue(dev->net);
2965                 usb_put_urb(urb);
2966                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2967                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2968                 return;
2969         }
2970 #endif
2971
2972         ret = usb_submit_urb(urb, GFP_ATOMIC);
2973         switch (ret) {
2974         case 0:
2975                 netif_trans_update(dev->net);
2976                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2977                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2978                         netif_stop_queue(dev->net);
2979                 break;
2980         case -EPIPE:
2981                 netif_stop_queue(dev->net);
2982                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2983                 usb_autopm_put_interface_async(dev->intf);
2984                 break;
2985         default:
2986                 usb_autopm_put_interface_async(dev->intf);
2987                 netif_dbg(dev, tx_err, dev->net,
2988                           "tx: submit urb err %d\n", ret);
2989                 break;
2990         }
2991
2992         spin_unlock_irqrestore(&dev->txq.lock, flags);
2993
2994         if (ret) {
2995                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2996 drop:
2997                 dev->net->stats.tx_dropped++;
2998                 if (skb)
2999                         dev_kfree_skb_any(skb);
3000                 usb_free_urb(urb);
3001         } else
3002                 netif_dbg(dev, tx_queued, dev->net,
3003                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3004 }
3005
3006 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3007 {
3008         struct urb *urb;
3009         int i;
3010
3011         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3012                 for (i = 0; i < 10; i++) {
3013                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3014                                 break;
3015                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3016                         if (urb)
3017                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3018                                         return;
3019                 }
3020
3021                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3022                         tasklet_schedule(&dev->bh);
3023         }
3024         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3025                 netif_wake_queue(dev->net);
3026 }
3027
3028 static void lan78xx_bh(unsigned long param)
3029 {
3030         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3031         struct sk_buff *skb;
3032         struct skb_data *entry;
3033
3034         while ((skb = skb_dequeue(&dev->done))) {
3035                 entry = (struct skb_data *)(skb->cb);
3036                 switch (entry->state) {
3037                 case rx_done:
3038                         entry->state = rx_cleanup;
3039                         rx_process(dev, skb);
3040                         continue;
3041                 case tx_done:
3042                         usb_free_urb(entry->urb);
3043                         dev_kfree_skb(skb);
3044                         continue;
3045                 case rx_cleanup:
3046                         usb_free_urb(entry->urb);
3047                         dev_kfree_skb(skb);
3048                         continue;
3049                 default:
3050                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3051                         return;
3052                 }
3053         }
3054
3055         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3056                 /* reset update timer delta */
3057                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3058                         dev->delta = 1;
3059                         mod_timer(&dev->stat_monitor,
3060                                   jiffies + STAT_UPDATE_TIMER);
3061                 }
3062
3063                 if (!skb_queue_empty(&dev->txq_pend))
3064                         lan78xx_tx_bh(dev);
3065
3066                 if (!timer_pending(&dev->delay) &&
3067                     !test_bit(EVENT_RX_HALT, &dev->flags))
3068                         lan78xx_rx_bh(dev);
3069         }
3070 }
3071
3072 static void lan78xx_delayedwork(struct work_struct *work)
3073 {
3074         int status;
3075         struct lan78xx_net *dev;
3076
3077         dev = container_of(work, struct lan78xx_net, wq.work);
3078
3079         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3080                 unlink_urbs(dev, &dev->txq);
3081                 status = usb_autopm_get_interface(dev->intf);
3082                 if (status < 0)
3083                         goto fail_pipe;
3084                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3085                 usb_autopm_put_interface(dev->intf);
3086                 if (status < 0 &&
3087                     status != -EPIPE &&
3088                     status != -ESHUTDOWN) {
3089                         if (netif_msg_tx_err(dev))
3090 fail_pipe:
3091                                 netdev_err(dev->net,
3092                                            "can't clear tx halt, status %d\n",
3093                                            status);
3094                 } else {
3095                         clear_bit(EVENT_TX_HALT, &dev->flags);
3096                         if (status != -ESHUTDOWN)
3097                                 netif_wake_queue(dev->net);
3098                 }
3099         }
3100         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3101                 unlink_urbs(dev, &dev->rxq);
3102                 status = usb_autopm_get_interface(dev->intf);
3103                 if (status < 0)
3104                                 goto fail_halt;
3105                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3106                 usb_autopm_put_interface(dev->intf);
3107                 if (status < 0 &&
3108                     status != -EPIPE &&
3109                     status != -ESHUTDOWN) {
3110                         if (netif_msg_rx_err(dev))
3111 fail_halt:
3112                                 netdev_err(dev->net,
3113                                            "can't clear rx halt, status %d\n",
3114                                            status);
3115                 } else {
3116                         clear_bit(EVENT_RX_HALT, &dev->flags);
3117                         tasklet_schedule(&dev->bh);
3118                 }
3119         }
3120
3121         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3122                 int ret = 0;
3123
3124                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3125                 status = usb_autopm_get_interface(dev->intf);
3126                 if (status < 0)
3127                         goto skip_reset;
3128                 if (lan78xx_link_reset(dev) < 0) {
3129                         usb_autopm_put_interface(dev->intf);
3130 skip_reset:
3131                         netdev_info(dev->net, "link reset failed (%d)\n",
3132                                     ret);
3133                 } else {
3134                         usb_autopm_put_interface(dev->intf);
3135                 }
3136         }
3137
3138         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3139                 lan78xx_update_stats(dev);
3140
3141                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3142
3143                 mod_timer(&dev->stat_monitor,
3144                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3145
3146                 dev->delta = min((dev->delta * 2), 50);
3147         }
3148 }
3149
3150 static void intr_complete(struct urb *urb)
3151 {
3152         struct lan78xx_net *dev = urb->context;
3153         int status = urb->status;
3154
3155         switch (status) {
3156         /* success */
3157         case 0:
3158                 lan78xx_status(dev, urb);
3159                 break;
3160
3161         /* software-driven interface shutdown */
3162         case -ENOENT:                   /* urb killed */
3163         case -ESHUTDOWN:                /* hardware gone */
3164                 netif_dbg(dev, ifdown, dev->net,
3165                           "intr shutdown, code %d\n", status);
3166                 return;
3167
3168         /* NOTE:  not throttling like RX/TX, since this endpoint
3169          * already polls infrequently
3170          */
3171         default:
3172                 netdev_dbg(dev->net, "intr status %d\n", status);
3173                 break;
3174         }
3175
3176         if (!netif_running(dev->net))
3177                 return;
3178
3179         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3180         status = usb_submit_urb(urb, GFP_ATOMIC);
3181         if (status != 0)
3182                 netif_err(dev, timer, dev->net,
3183                           "intr resubmit --> %d\n", status);
3184 }
3185
3186 static void lan78xx_disconnect(struct usb_interface *intf)
3187 {
3188         struct lan78xx_net              *dev;
3189         struct usb_device               *udev;
3190         struct net_device               *net;
3191
3192         dev = usb_get_intfdata(intf);
3193         usb_set_intfdata(intf, NULL);
3194         if (!dev)
3195                 return;
3196
3197         udev = interface_to_usbdev(intf);
3198
3199         net = dev->net;
3200         unregister_netdev(net);
3201
3202         cancel_delayed_work_sync(&dev->wq);
3203
3204         usb_scuttle_anchored_urbs(&dev->deferred);
3205
3206         lan78xx_unbind(dev, intf);
3207
3208         usb_kill_urb(dev->urb_intr);
3209         usb_free_urb(dev->urb_intr);
3210
3211         free_netdev(net);
3212         usb_put_dev(udev);
3213 }
3214
3215 static void lan78xx_tx_timeout(struct net_device *net)
3216 {
3217         struct lan78xx_net *dev = netdev_priv(net);
3218
3219         unlink_urbs(dev, &dev->txq);
3220         tasklet_schedule(&dev->bh);
3221 }
3222
3223 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3224                                                 struct net_device *netdev,
3225                                                 netdev_features_t features)
3226 {
3227         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3228                 features &= ~NETIF_F_GSO_MASK;
3229
3230         features = vlan_features_check(skb, features);
3231         features = vxlan_features_check(skb, features);
3232
3233         return features;
3234 }
3235
3236 static const struct net_device_ops lan78xx_netdev_ops = {
3237         .ndo_open               = lan78xx_open,
3238         .ndo_stop               = lan78xx_stop,
3239         .ndo_start_xmit         = lan78xx_start_xmit,
3240         .ndo_tx_timeout         = lan78xx_tx_timeout,
3241         .ndo_change_mtu         = lan78xx_change_mtu,
3242         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3243         .ndo_validate_addr      = eth_validate_addr,
3244         .ndo_do_ioctl           = lan78xx_ioctl,
3245         .ndo_set_rx_mode        = lan78xx_set_multicast,
3246         .ndo_set_features       = lan78xx_set_features,
3247         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3248         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3249         .ndo_features_check     = lan78xx_features_check,
3250 };
3251
3252 static void lan78xx_stat_monitor(unsigned long param)
3253 {
3254         struct lan78xx_net *dev;
3255
3256         dev = (struct lan78xx_net *)param;
3257
3258         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3259 }
3260
3261 static int lan78xx_probe(struct usb_interface *intf,
3262                          const struct usb_device_id *id)
3263 {
3264         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3265         struct lan78xx_net *dev;
3266         struct net_device *netdev;
3267         struct usb_device *udev;
3268         int ret;
3269         unsigned maxp;
3270         unsigned period;
3271         u8 *buf = NULL;
3272
3273         udev = interface_to_usbdev(intf);
3274         udev = usb_get_dev(udev);
3275
3276         ret = -ENOMEM;
3277         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3278         if (!netdev) {
3279                         dev_err(&intf->dev, "Error: OOM\n");
3280                         goto out1;
3281         }
3282
3283         /* netdev_printk() needs this */
3284         SET_NETDEV_DEV(netdev, &intf->dev);
3285
3286         dev = netdev_priv(netdev);
3287         dev->udev = udev;
3288         dev->intf = intf;
3289         dev->net = netdev;
3290         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3291                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3292
3293         skb_queue_head_init(&dev->rxq);
3294         skb_queue_head_init(&dev->txq);
3295         skb_queue_head_init(&dev->done);
3296         skb_queue_head_init(&dev->rxq_pause);
3297         skb_queue_head_init(&dev->txq_pend);
3298         mutex_init(&dev->phy_mutex);
3299
3300         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3301         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3302         init_usb_anchor(&dev->deferred);
3303
3304         netdev->netdev_ops = &lan78xx_netdev_ops;
3305         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3306         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3307
3308         dev->stat_monitor.function = lan78xx_stat_monitor;
3309         dev->stat_monitor.data = (unsigned long)dev;
3310         dev->delta = 1;
3311         init_timer(&dev->stat_monitor);
3312
3313         mutex_init(&dev->stats.access_lock);
3314
3315         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3316                 ret = -ENODEV;
3317                 goto out2;
3318         }
3319
3320         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3321         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3322         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3323                 ret = -ENODEV;
3324                 goto out2;
3325         }
3326
3327         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3328         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3329         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3330                 ret = -ENODEV;
3331                 goto out2;
3332         }
3333
3334         ep_intr = &intf->cur_altsetting->endpoint[2];
3335         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3336                 ret = -ENODEV;
3337                 goto out2;
3338         }
3339
3340         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3341                                         usb_endpoint_num(&ep_intr->desc));
3342
3343         ret = lan78xx_bind(dev, intf);
3344         if (ret < 0)
3345                 goto out2;
3346         strcpy(netdev->name, "eth%d");
3347
3348         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3349                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3350         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3351
3352         period = ep_intr->desc.bInterval;
3353         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3354         buf = kmalloc(maxp, GFP_KERNEL);
3355         if (buf) {
3356                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3357                 if (!dev->urb_intr) {
3358                         ret = -ENOMEM;
3359                         kfree(buf);
3360                         goto out3;
3361                 } else {
3362                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3363                                          dev->pipe_intr, buf, maxp,
3364                                          intr_complete, dev, period);
3365                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3366                 }
3367         }
3368
3369         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3370
3371         /* Reject broken descriptors. */
3372         if (dev->maxpacket == 0) {
3373                 ret = -ENODEV;
3374                 goto out3;
3375         }
3376
3377         /* driver requires remote-wakeup capability during autosuspend. */
3378         intf->needs_remote_wakeup = 1;
3379
3380         ret = register_netdev(netdev);
3381         if (ret != 0) {
3382                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3383                 goto out2;
3384         }
3385
3386         usb_set_intfdata(intf, dev);
3387
3388         ret = device_set_wakeup_enable(&udev->dev, true);
3389
3390          /* Default delay of 2sec has more overhead than advantage.
3391           * Set to 10sec as default.
3392           */
3393         pm_runtime_set_autosuspend_delay(&udev->dev,
3394                                          DEFAULT_AUTOSUSPEND_DELAY);
3395
3396         return 0;
3397
3398 out3:
3399         lan78xx_unbind(dev, intf);
3400 out2:
3401         free_netdev(netdev);
3402 out1:
3403         usb_put_dev(udev);
3404
3405         return ret;
3406 }
3407
3408 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3409 {
3410         const u16 crc16poly = 0x8005;
3411         int i;
3412         u16 bit, crc, msb;
3413         u8 data;
3414
3415         crc = 0xFFFF;
3416         for (i = 0; i < len; i++) {
3417                 data = *buf++;
3418                 for (bit = 0; bit < 8; bit++) {
3419                         msb = crc >> 15;
3420                         crc <<= 1;
3421
3422                         if (msb ^ (u16)(data & 1)) {
3423                                 crc ^= crc16poly;
3424                                 crc |= (u16)0x0001U;
3425                         }
3426                         data >>= 1;
3427                 }
3428         }
3429
3430         return crc;
3431 }
3432
3433 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3434 {
3435         u32 buf;
3436         int ret;
3437         int mask_index;
3438         u16 crc;
3439         u32 temp_wucsr;
3440         u32 temp_pmt_ctl;
3441         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3442         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3443         const u8 arp_type[2] = { 0x08, 0x06 };
3444
3445         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3446         buf &= ~MAC_TX_TXEN_;
3447         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3448         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3449         buf &= ~MAC_RX_RXEN_;
3450         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3451
3452         ret = lan78xx_write_reg(dev, WUCSR, 0);
3453         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3454         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3455
3456         temp_wucsr = 0;
3457
3458         temp_pmt_ctl = 0;
3459         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3460         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3461         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3462
3463         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3464                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3465
3466         mask_index = 0;
3467         if (wol & WAKE_PHY) {
3468                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3469
3470                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3471                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3472                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3473         }
3474         if (wol & WAKE_MAGIC) {
3475                 temp_wucsr |= WUCSR_MPEN_;
3476
3477                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3478                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3479                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3480         }
3481         if (wol & WAKE_BCAST) {
3482                 temp_wucsr |= WUCSR_BCST_EN_;
3483
3484                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3485                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3486                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3487         }
3488         if (wol & WAKE_MCAST) {
3489                 temp_wucsr |= WUCSR_WAKE_EN_;
3490
3491                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3492                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3493                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3494                                         WUF_CFGX_EN_ |
3495                                         WUF_CFGX_TYPE_MCAST_ |
3496                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3497                                         (crc & WUF_CFGX_CRC16_MASK_));
3498
3499                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3500                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3501                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3502                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3503                 mask_index++;
3504
3505                 /* for IPv6 Multicast */
3506                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3507                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3508                                         WUF_CFGX_EN_ |
3509                                         WUF_CFGX_TYPE_MCAST_ |
3510                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3511                                         (crc & WUF_CFGX_CRC16_MASK_));
3512
3513                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3514                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3515                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3516                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3517                 mask_index++;
3518
3519                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3520                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3521                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3522         }
3523         if (wol & WAKE_UCAST) {
3524                 temp_wucsr |= WUCSR_PFDA_EN_;
3525
3526                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3527                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3528                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3529         }
3530         if (wol & WAKE_ARP) {
3531                 temp_wucsr |= WUCSR_WAKE_EN_;
3532
3533                 /* set WUF_CFG & WUF_MASK
3534                  * for packettype (offset 12,13) = ARP (0x0806)
3535                  */
3536                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3537                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3538                                         WUF_CFGX_EN_ |
3539                                         WUF_CFGX_TYPE_ALL_ |
3540                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3541                                         (crc & WUF_CFGX_CRC16_MASK_));
3542
3543                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3544                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3545                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3546                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3547                 mask_index++;
3548
3549                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3550                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3551                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3552         }
3553
3554         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3555
3556         /* when multiple WOL bits are set */
3557         if (hweight_long((unsigned long)wol) > 1) {
3558                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3559                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3560                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3561         }
3562         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3563
3564         /* clear WUPS */
3565         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3566         buf |= PMT_CTL_WUPS_MASK_;
3567         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3568
3569         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3570         buf |= MAC_RX_RXEN_;
3571         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3572
3573         return 0;
3574 }
3575
3576 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3577 {
3578         struct lan78xx_net *dev = usb_get_intfdata(intf);
3579         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3580         u32 buf;
3581         int ret;
3582         int event;
3583
3584         event = message.event;
3585
3586         if (!dev->suspend_count++) {
3587                 spin_lock_irq(&dev->txq.lock);
3588                 /* don't autosuspend while transmitting */
3589                 if ((skb_queue_len(&dev->txq) ||
3590                      skb_queue_len(&dev->txq_pend)) &&
3591                         PMSG_IS_AUTO(message)) {
3592                         spin_unlock_irq(&dev->txq.lock);
3593                         ret = -EBUSY;
3594                         goto out;
3595                 } else {
3596                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3597                         spin_unlock_irq(&dev->txq.lock);
3598                 }
3599
3600                 /* stop TX & RX */
3601                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3602                 buf &= ~MAC_TX_TXEN_;
3603                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3604                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3605                 buf &= ~MAC_RX_RXEN_;
3606                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3607
3608                 /* empty out the rx and queues */
3609                 netif_device_detach(dev->net);
3610                 lan78xx_terminate_urbs(dev);
3611                 usb_kill_urb(dev->urb_intr);
3612
3613                 /* reattach */
3614                 netif_device_attach(dev->net);
3615         }
3616
3617         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3618                 del_timer(&dev->stat_monitor);
3619
3620                 if (PMSG_IS_AUTO(message)) {
3621                         /* auto suspend (selective suspend) */
3622                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3623                         buf &= ~MAC_TX_TXEN_;
3624                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3625                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3626                         buf &= ~MAC_RX_RXEN_;
3627                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3628
3629                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3630                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3631                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3632
3633                         /* set goodframe wakeup */
3634                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3635
3636                         buf |= WUCSR_RFE_WAKE_EN_;
3637                         buf |= WUCSR_STORE_WAKE_;
3638
3639                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3640
3641                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3642
3643                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3644                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3645
3646                         buf |= PMT_CTL_PHY_WAKE_EN_;
3647                         buf |= PMT_CTL_WOL_EN_;
3648                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3649                         buf |= PMT_CTL_SUS_MODE_3_;
3650
3651                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3652
3653                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3654
3655                         buf |= PMT_CTL_WUPS_MASK_;
3656
3657                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3658
3659                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3660                         buf |= MAC_RX_RXEN_;
3661                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3662                 } else {
3663                         lan78xx_set_suspend(dev, pdata->wol);
3664                 }
3665         }
3666
3667         ret = 0;
3668 out:
3669         return ret;
3670 }
3671
3672 static int lan78xx_resume(struct usb_interface *intf)
3673 {
3674         struct lan78xx_net *dev = usb_get_intfdata(intf);
3675         struct sk_buff *skb;
3676         struct urb *res;
3677         int ret;
3678         u32 buf;
3679
3680         if (!timer_pending(&dev->stat_monitor)) {
3681                 dev->delta = 1;
3682                 mod_timer(&dev->stat_monitor,
3683                           jiffies + STAT_UPDATE_TIMER);
3684         }
3685
3686         if (!--dev->suspend_count) {
3687                 /* resume interrupt URBs */
3688                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3689                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3690
3691                 spin_lock_irq(&dev->txq.lock);
3692                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3693                         skb = (struct sk_buff *)res->context;
3694                         ret = usb_submit_urb(res, GFP_ATOMIC);
3695                         if (ret < 0) {
3696                                 dev_kfree_skb_any(skb);
3697                                 usb_free_urb(res);
3698                                 usb_autopm_put_interface_async(dev->intf);
3699                         } else {
3700                                 netif_trans_update(dev->net);
3701                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3702                         }
3703                 }
3704
3705                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3706                 spin_unlock_irq(&dev->txq.lock);
3707
3708                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3709                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3710                                 netif_start_queue(dev->net);
3711                         tasklet_schedule(&dev->bh);
3712                 }
3713         }
3714
3715         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3716         ret = lan78xx_write_reg(dev, WUCSR, 0);
3717         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3718
3719         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3720                                              WUCSR2_ARP_RCD_ |
3721                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3722                                              WUCSR2_IPV4_TCPSYN_RCD_);
3723
3724         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3725                                             WUCSR_EEE_RX_WAKE_ |
3726                                             WUCSR_PFDA_FR_ |
3727                                             WUCSR_RFE_WAKE_FR_ |
3728                                             WUCSR_WUFR_ |
3729                                             WUCSR_MPR_ |
3730                                             WUCSR_BCST_FR_);
3731
3732         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3733         buf |= MAC_TX_TXEN_;
3734         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3735
3736         return 0;
3737 }
3738
3739 static int lan78xx_reset_resume(struct usb_interface *intf)
3740 {
3741         struct lan78xx_net *dev = usb_get_intfdata(intf);
3742
3743         lan78xx_reset(dev);
3744
3745         lan78xx_phy_init(dev);
3746
3747         return lan78xx_resume(intf);
3748 }
3749
3750 static const struct usb_device_id products[] = {
3751         {
3752         /* LAN7800 USB Gigabit Ethernet Device */
3753         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3754         },
3755         {
3756         /* LAN7850 USB Gigabit Ethernet Device */
3757         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3758         },
3759         {},
3760 };
3761 MODULE_DEVICE_TABLE(usb, products);
3762
3763 static struct usb_driver lan78xx_driver = {
3764         .name                   = DRIVER_NAME,
3765         .id_table               = products,
3766         .probe                  = lan78xx_probe,
3767         .disconnect             = lan78xx_disconnect,
3768         .suspend                = lan78xx_suspend,
3769         .resume                 = lan78xx_resume,
3770         .reset_resume           = lan78xx_reset_resume,
3771         .supports_autosuspend   = 1,
3772         .disable_hub_initiated_lpm = 1,
3773 };
3774
3775 module_usb_driver(lan78xx_driver);
3776
3777 MODULE_AUTHOR(DRIVER_AUTHOR);
3778 MODULE_DESCRIPTION(DRIVER_DESC);
3779 MODULE_LICENSE("GPL");