GNU Linux-libre 4.9.283-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include <linux/of_net.h>
36 #include "lan78xx.h"
37
38 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
39 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
40 #define DRIVER_NAME     "lan78xx"
41 #define DRIVER_VERSION  "1.0.4"
42
43 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
44 #define THROTTLE_JIFFIES                (HZ / 8)
45 #define UNLINK_TIMEOUT_MS               3
46
47 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
48
49 #define SS_USB_PKT_SIZE                 (1024)
50 #define HS_USB_PKT_SIZE                 (512)
51 #define FS_USB_PKT_SIZE                 (64)
52
53 #define MAX_RX_FIFO_SIZE                (12 * 1024)
54 #define MAX_TX_FIFO_SIZE                (12 * 1024)
55 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
56 #define DEFAULT_BULK_IN_DELAY           (0x0800)
57 #define MAX_SINGLE_PACKET_SIZE          (9000)
58 #define DEFAULT_TX_CSUM_ENABLE          (true)
59 #define DEFAULT_RX_CSUM_ENABLE          (true)
60 #define DEFAULT_TSO_CSUM_ENABLE         (true)
61 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
62 #define TX_OVERHEAD                     (8)
63 #define RXW_PADDING                     2
64
65 #define LAN78XX_USB_VENDOR_ID           (0x0424)
66 #define LAN7800_USB_PRODUCT_ID          (0x7800)
67 #define LAN7850_USB_PRODUCT_ID          (0x7850)
68 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
69 #define LAN78XX_OTP_MAGIC               (0x78F3)
70
71 #define MII_READ                        1
72 #define MII_WRITE                       0
73
74 #define EEPROM_INDICATOR                (0xA5)
75 #define EEPROM_MAC_OFFSET               (0x01)
76 #define MAX_EEPROM_SIZE                 512
77 #define OTP_INDICATOR_1                 (0xF3)
78 #define OTP_INDICATOR_2                 (0xF7)
79
80 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
81                                          WAKE_MCAST | WAKE_BCAST | \
82                                          WAKE_ARP | WAKE_MAGIC)
83
84 /* USB related defines */
85 #define BULK_IN_PIPE                    1
86 #define BULK_OUT_PIPE                   2
87
88 /* default autosuspend delay (mSec)*/
89 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
90
91 /* statistic update interval (mSec) */
92 #define STAT_UPDATE_TIMER               (1 * 1000)
93
94 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
95         "RX FCS Errors",
96         "RX Alignment Errors",
97         "Rx Fragment Errors",
98         "RX Jabber Errors",
99         "RX Undersize Frame Errors",
100         "RX Oversize Frame Errors",
101         "RX Dropped Frames",
102         "RX Unicast Byte Count",
103         "RX Broadcast Byte Count",
104         "RX Multicast Byte Count",
105         "RX Unicast Frames",
106         "RX Broadcast Frames",
107         "RX Multicast Frames",
108         "RX Pause Frames",
109         "RX 64 Byte Frames",
110         "RX 65 - 127 Byte Frames",
111         "RX 128 - 255 Byte Frames",
112         "RX 256 - 511 Bytes Frames",
113         "RX 512 - 1023 Byte Frames",
114         "RX 1024 - 1518 Byte Frames",
115         "RX Greater 1518 Byte Frames",
116         "EEE RX LPI Transitions",
117         "EEE RX LPI Time",
118         "TX FCS Errors",
119         "TX Excess Deferral Errors",
120         "TX Carrier Errors",
121         "TX Bad Byte Count",
122         "TX Single Collisions",
123         "TX Multiple Collisions",
124         "TX Excessive Collision",
125         "TX Late Collisions",
126         "TX Unicast Byte Count",
127         "TX Broadcast Byte Count",
128         "TX Multicast Byte Count",
129         "TX Unicast Frames",
130         "TX Broadcast Frames",
131         "TX Multicast Frames",
132         "TX Pause Frames",
133         "TX 64 Byte Frames",
134         "TX 65 - 127 Byte Frames",
135         "TX 128 - 255 Byte Frames",
136         "TX 256 - 511 Bytes Frames",
137         "TX 512 - 1023 Byte Frames",
138         "TX 1024 - 1518 Byte Frames",
139         "TX Greater 1518 Byte Frames",
140         "EEE TX LPI Transitions",
141         "EEE TX LPI Time",
142 };
143
144 struct lan78xx_statstage {
145         u32 rx_fcs_errors;
146         u32 rx_alignment_errors;
147         u32 rx_fragment_errors;
148         u32 rx_jabber_errors;
149         u32 rx_undersize_frame_errors;
150         u32 rx_oversize_frame_errors;
151         u32 rx_dropped_frames;
152         u32 rx_unicast_byte_count;
153         u32 rx_broadcast_byte_count;
154         u32 rx_multicast_byte_count;
155         u32 rx_unicast_frames;
156         u32 rx_broadcast_frames;
157         u32 rx_multicast_frames;
158         u32 rx_pause_frames;
159         u32 rx_64_byte_frames;
160         u32 rx_65_127_byte_frames;
161         u32 rx_128_255_byte_frames;
162         u32 rx_256_511_bytes_frames;
163         u32 rx_512_1023_byte_frames;
164         u32 rx_1024_1518_byte_frames;
165         u32 rx_greater_1518_byte_frames;
166         u32 eee_rx_lpi_transitions;
167         u32 eee_rx_lpi_time;
168         u32 tx_fcs_errors;
169         u32 tx_excess_deferral_errors;
170         u32 tx_carrier_errors;
171         u32 tx_bad_byte_count;
172         u32 tx_single_collisions;
173         u32 tx_multiple_collisions;
174         u32 tx_excessive_collision;
175         u32 tx_late_collisions;
176         u32 tx_unicast_byte_count;
177         u32 tx_broadcast_byte_count;
178         u32 tx_multicast_byte_count;
179         u32 tx_unicast_frames;
180         u32 tx_broadcast_frames;
181         u32 tx_multicast_frames;
182         u32 tx_pause_frames;
183         u32 tx_64_byte_frames;
184         u32 tx_65_127_byte_frames;
185         u32 tx_128_255_byte_frames;
186         u32 tx_256_511_bytes_frames;
187         u32 tx_512_1023_byte_frames;
188         u32 tx_1024_1518_byte_frames;
189         u32 tx_greater_1518_byte_frames;
190         u32 eee_tx_lpi_transitions;
191         u32 eee_tx_lpi_time;
192 };
193
194 struct lan78xx_statstage64 {
195         u64 rx_fcs_errors;
196         u64 rx_alignment_errors;
197         u64 rx_fragment_errors;
198         u64 rx_jabber_errors;
199         u64 rx_undersize_frame_errors;
200         u64 rx_oversize_frame_errors;
201         u64 rx_dropped_frames;
202         u64 rx_unicast_byte_count;
203         u64 rx_broadcast_byte_count;
204         u64 rx_multicast_byte_count;
205         u64 rx_unicast_frames;
206         u64 rx_broadcast_frames;
207         u64 rx_multicast_frames;
208         u64 rx_pause_frames;
209         u64 rx_64_byte_frames;
210         u64 rx_65_127_byte_frames;
211         u64 rx_128_255_byte_frames;
212         u64 rx_256_511_bytes_frames;
213         u64 rx_512_1023_byte_frames;
214         u64 rx_1024_1518_byte_frames;
215         u64 rx_greater_1518_byte_frames;
216         u64 eee_rx_lpi_transitions;
217         u64 eee_rx_lpi_time;
218         u64 tx_fcs_errors;
219         u64 tx_excess_deferral_errors;
220         u64 tx_carrier_errors;
221         u64 tx_bad_byte_count;
222         u64 tx_single_collisions;
223         u64 tx_multiple_collisions;
224         u64 tx_excessive_collision;
225         u64 tx_late_collisions;
226         u64 tx_unicast_byte_count;
227         u64 tx_broadcast_byte_count;
228         u64 tx_multicast_byte_count;
229         u64 tx_unicast_frames;
230         u64 tx_broadcast_frames;
231         u64 tx_multicast_frames;
232         u64 tx_pause_frames;
233         u64 tx_64_byte_frames;
234         u64 tx_65_127_byte_frames;
235         u64 tx_128_255_byte_frames;
236         u64 tx_256_511_bytes_frames;
237         u64 tx_512_1023_byte_frames;
238         u64 tx_1024_1518_byte_frames;
239         u64 tx_greater_1518_byte_frames;
240         u64 eee_tx_lpi_transitions;
241         u64 eee_tx_lpi_time;
242 };
243
244 struct lan78xx_net;
245
246 struct lan78xx_priv {
247         struct lan78xx_net *dev;
248         u32 rfe_ctl;
249         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
250         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
251         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
252         struct mutex dataport_mutex; /* for dataport access */
253         spinlock_t rfe_ctl_lock; /* for rfe register access */
254         struct work_struct set_multicast;
255         struct work_struct set_vlan;
256         u32 wol;
257 };
258
259 enum skb_state {
260         illegal = 0,
261         tx_start,
262         tx_done,
263         rx_start,
264         rx_done,
265         rx_cleanup,
266         unlink_start
267 };
268
269 struct skb_data {               /* skb->cb is one of these */
270         struct urb *urb;
271         struct lan78xx_net *dev;
272         enum skb_state state;
273         size_t length;
274         int num_of_packet;
275 };
276
277 struct usb_context {
278         struct usb_ctrlrequest req;
279         struct lan78xx_net *dev;
280 };
281
282 #define EVENT_TX_HALT                   0
283 #define EVENT_RX_HALT                   1
284 #define EVENT_RX_MEMORY                 2
285 #define EVENT_STS_SPLIT                 3
286 #define EVENT_LINK_RESET                4
287 #define EVENT_RX_PAUSED                 5
288 #define EVENT_DEV_WAKING                6
289 #define EVENT_DEV_ASLEEP                7
290 #define EVENT_DEV_OPEN                  8
291 #define EVENT_STAT_UPDATE               9
292
293 struct statstage {
294         struct mutex                    access_lock;    /* for stats access */
295         struct lan78xx_statstage        saved;
296         struct lan78xx_statstage        rollover_count;
297         struct lan78xx_statstage        rollover_max;
298         struct lan78xx_statstage64      curr_stat;
299 };
300
301 struct lan78xx_net {
302         struct net_device       *net;
303         struct usb_device       *udev;
304         struct usb_interface    *intf;
305         void                    *driver_priv;
306
307         int                     rx_qlen;
308         int                     tx_qlen;
309         struct sk_buff_head     rxq;
310         struct sk_buff_head     txq;
311         struct sk_buff_head     done;
312         struct sk_buff_head     rxq_pause;
313         struct sk_buff_head     txq_pend;
314
315         struct tasklet_struct   bh;
316         struct delayed_work     wq;
317
318         int                     msg_enable;
319
320         struct urb              *urb_intr;
321         struct usb_anchor       deferred;
322
323         struct mutex            phy_mutex; /* for phy access */
324         unsigned                pipe_in, pipe_out, pipe_intr;
325
326         u32                     hard_mtu;       /* count any extra framing */
327         size_t                  rx_urb_size;    /* size for rx urbs */
328
329         unsigned long           flags;
330
331         wait_queue_head_t       *wait;
332         unsigned char           suspend_count;
333
334         unsigned                maxpacket;
335         struct timer_list       delay;
336         struct timer_list       stat_monitor;
337
338         unsigned long           data[5];
339
340         int                     link_on;
341         u8                      mdix_ctrl;
342
343         u32                     chipid;
344         u32                     chiprev;
345         struct mii_bus          *mdiobus;
346
347         int                     fc_autoneg;
348         u8                      fc_request_control;
349
350         int                     delta;
351         struct statstage        stats;
352 };
353
354 /* use ethtool to change the level for any given device */
355 static int msg_level = -1;
356 module_param(msg_level, int, 0);
357 MODULE_PARM_DESC(msg_level, "Override default message level");
358
359 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
360 {
361         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
362         int ret;
363
364         if (!buf)
365                 return -ENOMEM;
366
367         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
368                               USB_VENDOR_REQUEST_READ_REGISTER,
369                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
370                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
371         if (likely(ret >= 0)) {
372                 le32_to_cpus(buf);
373                 *data = *buf;
374         } else {
375                 netdev_warn(dev->net,
376                             "Failed to read register index 0x%08x. ret = %d",
377                             index, ret);
378         }
379
380         kfree(buf);
381
382         return ret;
383 }
384
385 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
386 {
387         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
388         int ret;
389
390         if (!buf)
391                 return -ENOMEM;
392
393         *buf = data;
394         cpu_to_le32s(buf);
395
396         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
397                               USB_VENDOR_REQUEST_WRITE_REGISTER,
398                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
399                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
400         if (unlikely(ret < 0)) {
401                 netdev_warn(dev->net,
402                             "Failed to write register index 0x%08x. ret = %d",
403                             index, ret);
404         }
405
406         kfree(buf);
407
408         return ret;
409 }
410
411 static int lan78xx_read_stats(struct lan78xx_net *dev,
412                               struct lan78xx_statstage *data)
413 {
414         int ret = 0;
415         int i;
416         struct lan78xx_statstage *stats;
417         u32 *src;
418         u32 *dst;
419
420         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
421         if (!stats)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev,
425                               usb_rcvctrlpipe(dev->udev, 0),
426                               USB_VENDOR_REQUEST_GET_STATS,
427                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
428                               0,
429                               0,
430                               (void *)stats,
431                               sizeof(*stats),
432                               USB_CTRL_SET_TIMEOUT);
433         if (likely(ret >= 0)) {
434                 src = (u32 *)stats;
435                 dst = (u32 *)data;
436                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
437                         le32_to_cpus(&src[i]);
438                         dst[i] = src[i];
439                 }
440         } else {
441                 netdev_warn(dev->net,
442                             "Failed to read stat ret = %d", ret);
443         }
444
445         kfree(stats);
446
447         return ret;
448 }
449
450 #define check_counter_rollover(struct1, dev_stats, member) {    \
451         if (struct1->member < dev_stats.saved.member)           \
452                 dev_stats.rollover_count.member++;              \
453         }
454
455 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
456                                         struct lan78xx_statstage *stats)
457 {
458         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
459         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
460         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
461         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
462         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
463         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
464         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
465         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
466         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
467         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
468         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
469         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
470         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
471         check_counter_rollover(stats, dev->stats, rx_pause_frames);
472         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
473         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
474         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
475         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
476         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
477         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
478         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
479         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
480         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
481         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
482         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
483         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
484         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
485         check_counter_rollover(stats, dev->stats, tx_single_collisions);
486         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
487         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
488         check_counter_rollover(stats, dev->stats, tx_late_collisions);
489         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
490         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
491         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
492         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
493         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
494         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
495         check_counter_rollover(stats, dev->stats, tx_pause_frames);
496         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
497         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
498         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
499         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
500         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
501         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
502         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
503         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
504         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
505
506         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
507 }
508
509 static void lan78xx_update_stats(struct lan78xx_net *dev)
510 {
511         u32 *p, *count, *max;
512         u64 *data;
513         int i;
514         struct lan78xx_statstage lan78xx_stats;
515
516         if (usb_autopm_get_interface(dev->intf) < 0)
517                 return;
518
519         p = (u32 *)&lan78xx_stats;
520         count = (u32 *)&dev->stats.rollover_count;
521         max = (u32 *)&dev->stats.rollover_max;
522         data = (u64 *)&dev->stats.curr_stat;
523
524         mutex_lock(&dev->stats.access_lock);
525
526         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
527                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
528
529         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
530                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
531
532         mutex_unlock(&dev->stats.access_lock);
533
534         usb_autopm_put_interface(dev->intf);
535 }
536
537 /* Loop until the read is completed with timeout called with phy_mutex held */
538 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
539 {
540         unsigned long start_time = jiffies;
541         u32 val;
542         int ret;
543
544         do {
545                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
546                 if (unlikely(ret < 0))
547                         return -EIO;
548
549                 if (!(val & MII_ACC_MII_BUSY_))
550                         return 0;
551         } while (!time_after(jiffies, start_time + HZ));
552
553         return -EIO;
554 }
555
556 static inline u32 mii_access(int id, int index, int read)
557 {
558         u32 ret;
559
560         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
561         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
562         if (read)
563                 ret |= MII_ACC_MII_READ_;
564         else
565                 ret |= MII_ACC_MII_WRITE_;
566         ret |= MII_ACC_MII_BUSY_;
567
568         return ret;
569 }
570
571 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
572 {
573         unsigned long start_time = jiffies;
574         u32 val;
575         int ret;
576
577         do {
578                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
579                 if (unlikely(ret < 0))
580                         return -EIO;
581
582                 if (!(val & E2P_CMD_EPC_BUSY_) ||
583                     (val & E2P_CMD_EPC_TIMEOUT_))
584                         break;
585                 usleep_range(40, 100);
586         } while (!time_after(jiffies, start_time + HZ));
587
588         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
589                 netdev_warn(dev->net, "EEPROM read operation timeout");
590                 return -EIO;
591         }
592
593         return 0;
594 }
595
596 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
597 {
598         unsigned long start_time = jiffies;
599         u32 val;
600         int ret;
601
602         do {
603                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
604                 if (unlikely(ret < 0))
605                         return -EIO;
606
607                 if (!(val & E2P_CMD_EPC_BUSY_))
608                         return 0;
609
610                 usleep_range(40, 100);
611         } while (!time_after(jiffies, start_time + HZ));
612
613         netdev_warn(dev->net, "EEPROM is busy");
614         return -EIO;
615 }
616
617 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
618                                    u32 length, u8 *data)
619 {
620         u32 val;
621         u32 saved;
622         int i, ret;
623         int retval;
624
625         /* depends on chip, some EEPROM pins are muxed with LED function.
626          * disable & restore LED function to access EEPROM.
627          */
628         ret = lan78xx_read_reg(dev, HW_CFG, &val);
629         saved = val;
630         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
631                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
632                 ret = lan78xx_write_reg(dev, HW_CFG, val);
633         }
634
635         retval = lan78xx_eeprom_confirm_not_busy(dev);
636         if (retval)
637                 return retval;
638
639         for (i = 0; i < length; i++) {
640                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
641                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
642                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
643                 if (unlikely(ret < 0)) {
644                         retval = -EIO;
645                         goto exit;
646                 }
647
648                 retval = lan78xx_wait_eeprom(dev);
649                 if (retval < 0)
650                         goto exit;
651
652                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
653                 if (unlikely(ret < 0)) {
654                         retval = -EIO;
655                         goto exit;
656                 }
657
658                 data[i] = val & 0xFF;
659                 offset++;
660         }
661
662         retval = 0;
663 exit:
664         if (dev->chipid == ID_REV_CHIP_ID_7800_)
665                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
666
667         return retval;
668 }
669
670 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
671                                u32 length, u8 *data)
672 {
673         u8 sig;
674         int ret;
675
676         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
677         if ((ret == 0) && (sig == EEPROM_INDICATOR))
678                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
679         else
680                 ret = -EINVAL;
681
682         return ret;
683 }
684
685 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
686                                     u32 length, u8 *data)
687 {
688         u32 val;
689         u32 saved;
690         int i, ret;
691         int retval;
692
693         /* depends on chip, some EEPROM pins are muxed with LED function.
694          * disable & restore LED function to access EEPROM.
695          */
696         ret = lan78xx_read_reg(dev, HW_CFG, &val);
697         saved = val;
698         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
699                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
700                 ret = lan78xx_write_reg(dev, HW_CFG, val);
701         }
702
703         retval = lan78xx_eeprom_confirm_not_busy(dev);
704         if (retval)
705                 goto exit;
706
707         /* Issue write/erase enable command */
708         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
709         ret = lan78xx_write_reg(dev, E2P_CMD, val);
710         if (unlikely(ret < 0)) {
711                 retval = -EIO;
712                 goto exit;
713         }
714
715         retval = lan78xx_wait_eeprom(dev);
716         if (retval < 0)
717                 goto exit;
718
719         for (i = 0; i < length; i++) {
720                 /* Fill data register */
721                 val = data[i];
722                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
723                 if (ret < 0) {
724                         retval = -EIO;
725                         goto exit;
726                 }
727
728                 /* Send "write" command */
729                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
730                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
731                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
732                 if (ret < 0) {
733                         retval = -EIO;
734                         goto exit;
735                 }
736
737                 retval = lan78xx_wait_eeprom(dev);
738                 if (retval < 0)
739                         goto exit;
740
741                 offset++;
742         }
743
744         retval = 0;
745 exit:
746         if (dev->chipid == ID_REV_CHIP_ID_7800_)
747                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
748
749         return retval;
750 }
751
752 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
753                                 u32 length, u8 *data)
754 {
755         int i;
756         int ret;
757         u32 buf;
758         unsigned long timeout;
759
760         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
761
762         if (buf & OTP_PWR_DN_PWRDN_N_) {
763                 /* clear it and wait to be cleared */
764                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
765
766                 timeout = jiffies + HZ;
767                 do {
768                         usleep_range(1, 10);
769                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
770                         if (time_after(jiffies, timeout)) {
771                                 netdev_warn(dev->net,
772                                             "timeout on OTP_PWR_DN");
773                                 return -EIO;
774                         }
775                 } while (buf & OTP_PWR_DN_PWRDN_N_);
776         }
777
778         for (i = 0; i < length; i++) {
779                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
780                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
781                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
782                                         ((offset + i) & OTP_ADDR2_10_3));
783
784                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
785                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
786
787                 timeout = jiffies + HZ;
788                 do {
789                         udelay(1);
790                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
791                         if (time_after(jiffies, timeout)) {
792                                 netdev_warn(dev->net,
793                                             "timeout on OTP_STATUS");
794                                 return -EIO;
795                         }
796                 } while (buf & OTP_STATUS_BUSY_);
797
798                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
799
800                 data[i] = (u8)(buf & 0xFF);
801         }
802
803         return 0;
804 }
805
806 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
807                                  u32 length, u8 *data)
808 {
809         int i;
810         int ret;
811         u32 buf;
812         unsigned long timeout;
813
814         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
815
816         if (buf & OTP_PWR_DN_PWRDN_N_) {
817                 /* clear it and wait to be cleared */
818                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
819
820                 timeout = jiffies + HZ;
821                 do {
822                         udelay(1);
823                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
824                         if (time_after(jiffies, timeout)) {
825                                 netdev_warn(dev->net,
826                                             "timeout on OTP_PWR_DN completion");
827                                 return -EIO;
828                         }
829                 } while (buf & OTP_PWR_DN_PWRDN_N_);
830         }
831
832         /* set to BYTE program mode */
833         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
841                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "Timeout on OTP_STATUS completion");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854         }
855
856         return 0;
857 }
858
859 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
860                             u32 length, u8 *data)
861 {
862         u8 sig;
863         int ret;
864
865         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
866
867         if (ret == 0) {
868                 if (sig == OTP_INDICATOR_1)
869                         offset = offset;
870                 else if (sig == OTP_INDICATOR_2)
871                         offset += 0x100;
872                 else
873                         ret = -EINVAL;
874                 if (!ret)
875                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
876         }
877
878         return ret;
879 }
880
881 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
882 {
883         int i, ret;
884
885         for (i = 0; i < 100; i++) {
886                 u32 dp_sel;
887
888                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
889                 if (unlikely(ret < 0))
890                         return -EIO;
891
892                 if (dp_sel & DP_SEL_DPRDY_)
893                         return 0;
894
895                 usleep_range(40, 100);
896         }
897
898         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
899
900         return -EIO;
901 }
902
903 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
904                                   u32 addr, u32 length, u32 *buf)
905 {
906         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
907         u32 dp_sel;
908         int i, ret;
909
910         if (usb_autopm_get_interface(dev->intf) < 0)
911                         return 0;
912
913         mutex_lock(&pdata->dataport_mutex);
914
915         ret = lan78xx_dataport_wait_not_busy(dev);
916         if (ret < 0)
917                 goto done;
918
919         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
920
921         dp_sel &= ~DP_SEL_RSEL_MASK_;
922         dp_sel |= ram_select;
923         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
924
925         for (i = 0; i < length; i++) {
926                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
927
928                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
929
930                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
931
932                 ret = lan78xx_dataport_wait_not_busy(dev);
933                 if (ret < 0)
934                         goto done;
935         }
936
937 done:
938         mutex_unlock(&pdata->dataport_mutex);
939         usb_autopm_put_interface(dev->intf);
940
941         return ret;
942 }
943
944 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
945                                     int index, u8 addr[ETH_ALEN])
946 {
947         u32     temp;
948
949         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
950                 temp = addr[3];
951                 temp = addr[2] | (temp << 8);
952                 temp = addr[1] | (temp << 8);
953                 temp = addr[0] | (temp << 8);
954                 pdata->pfilter_table[index][1] = temp;
955                 temp = addr[5];
956                 temp = addr[4] | (temp << 8);
957                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
958                 pdata->pfilter_table[index][0] = temp;
959         }
960 }
961
962 /* returns hash bit number for given MAC address */
963 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
964 {
965         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
966 }
967
968 static void lan78xx_deferred_multicast_write(struct work_struct *param)
969 {
970         struct lan78xx_priv *pdata =
971                         container_of(param, struct lan78xx_priv, set_multicast);
972         struct lan78xx_net *dev = pdata->dev;
973         int i;
974         int ret;
975
976         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
977                   pdata->rfe_ctl);
978
979         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
980                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
981
982         for (i = 1; i < NUM_OF_MAF; i++) {
983                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
984                 ret = lan78xx_write_reg(dev, MAF_LO(i),
985                                         pdata->pfilter_table[i][1]);
986                 ret = lan78xx_write_reg(dev, MAF_HI(i),
987                                         pdata->pfilter_table[i][0]);
988         }
989
990         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
991 }
992
993 static void lan78xx_set_multicast(struct net_device *netdev)
994 {
995         struct lan78xx_net *dev = netdev_priv(netdev);
996         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
997         unsigned long flags;
998         int i;
999
1000         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1001
1002         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1003                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1004
1005         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1006                         pdata->mchash_table[i] = 0;
1007         /* pfilter_table[0] has own HW address */
1008         for (i = 1; i < NUM_OF_MAF; i++) {
1009                         pdata->pfilter_table[i][0] =
1010                         pdata->pfilter_table[i][1] = 0;
1011         }
1012
1013         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1014
1015         if (dev->net->flags & IFF_PROMISC) {
1016                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1017                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1018         } else {
1019                 if (dev->net->flags & IFF_ALLMULTI) {
1020                         netif_dbg(dev, drv, dev->net,
1021                                   "receive all multicast enabled");
1022                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1023                 }
1024         }
1025
1026         if (netdev_mc_count(dev->net)) {
1027                 struct netdev_hw_addr *ha;
1028                 int i;
1029
1030                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1031
1032                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1033
1034                 i = 1;
1035                 netdev_for_each_mc_addr(ha, netdev) {
1036                         /* set first 32 into Perfect Filter */
1037                         if (i < 33) {
1038                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1039                         } else {
1040                                 u32 bitnum = lan78xx_hash(ha->addr);
1041
1042                                 pdata->mchash_table[bitnum / 32] |=
1043                                                         (1 << (bitnum % 32));
1044                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1045                         }
1046                         i++;
1047                 }
1048         }
1049
1050         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1051
1052         /* defer register writes to a sleepable context */
1053         schedule_work(&pdata->set_multicast);
1054 }
1055
1056 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1057                                       u16 lcladv, u16 rmtadv)
1058 {
1059         u32 flow = 0, fct_flow = 0;
1060         int ret;
1061         u8 cap;
1062
1063         if (dev->fc_autoneg)
1064                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1065         else
1066                 cap = dev->fc_request_control;
1067
1068         if (cap & FLOW_CTRL_TX)
1069                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1070
1071         if (cap & FLOW_CTRL_RX)
1072                 flow |= FLOW_CR_RX_FCEN_;
1073
1074         if (dev->udev->speed == USB_SPEED_SUPER)
1075                 fct_flow = 0x817;
1076         else if (dev->udev->speed == USB_SPEED_HIGH)
1077                 fct_flow = 0x211;
1078
1079         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1080                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1081                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1082
1083         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1084
1085         /* threshold value should be set before enabling flow */
1086         ret = lan78xx_write_reg(dev, FLOW, flow);
1087
1088         return 0;
1089 }
1090
1091 static int lan78xx_link_reset(struct lan78xx_net *dev)
1092 {
1093         struct phy_device *phydev = dev->net->phydev;
1094         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1095         int ladv, radv, ret;
1096         u32 buf;
1097
1098         /* clear PHY interrupt status */
1099         ret = phy_read(phydev, LAN88XX_INT_STS);
1100         if (unlikely(ret < 0))
1101                 return -EIO;
1102
1103         /* clear LAN78xx interrupt status */
1104         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1105         if (unlikely(ret < 0))
1106                 return -EIO;
1107
1108         phy_read_status(phydev);
1109
1110         if (!phydev->link && dev->link_on) {
1111                 dev->link_on = false;
1112
1113                 /* reset MAC */
1114                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115                 if (unlikely(ret < 0))
1116                         return -EIO;
1117                 buf |= MAC_CR_RST_;
1118                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1119                 if (unlikely(ret < 0))
1120                         return -EIO;
1121
1122                 phy_mac_interrupt(phydev, 0);
1123
1124                 del_timer(&dev->stat_monitor);
1125         } else if (phydev->link && !dev->link_on) {
1126                 dev->link_on = true;
1127
1128                 phy_ethtool_gset(phydev, &ecmd);
1129
1130                 ret = phy_read(phydev, LAN88XX_INT_STS);
1131
1132                 if (dev->udev->speed == USB_SPEED_SUPER) {
1133                         if (ethtool_cmd_speed(&ecmd) == 1000) {
1134                                 /* disable U2 */
1135                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1136                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1137                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1138                                 /* enable U1 */
1139                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1140                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1141                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1142                         } else {
1143                                 /* enable U1 & U2 */
1144                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1145                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1146                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1147                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1148                         }
1149                 }
1150
1151                 ladv = phy_read(phydev, MII_ADVERTISE);
1152                 if (ladv < 0)
1153                         return ladv;
1154
1155                 radv = phy_read(phydev, MII_LPA);
1156                 if (radv < 0)
1157                         return radv;
1158
1159                 netif_dbg(dev, link, dev->net,
1160                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1161                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1162
1163                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1164                 phy_mac_interrupt(phydev, 1);
1165
1166                 if (!timer_pending(&dev->stat_monitor)) {
1167                         dev->delta = 1;
1168                         mod_timer(&dev->stat_monitor,
1169                                   jiffies + STAT_UPDATE_TIMER);
1170                 }
1171
1172                 tasklet_schedule(&dev->bh);
1173         }
1174
1175         return ret;
1176 }
1177
1178 /* some work can't be done in tasklets, so we use keventd
1179  *
1180  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1181  * but tasklet_schedule() doesn't.      hope the failure is rare.
1182  */
1183 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1184 {
1185         set_bit(work, &dev->flags);
1186         if (!schedule_delayed_work(&dev->wq, 0))
1187                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 }
1189
1190 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1191 {
1192         u32 intdata;
1193
1194         if (urb->actual_length != 4) {
1195                 netdev_warn(dev->net,
1196                             "unexpected urb length %d", urb->actual_length);
1197                 return;
1198         }
1199
1200         memcpy(&intdata, urb->transfer_buffer, 4);
1201         le32_to_cpus(&intdata);
1202
1203         if (intdata & INT_ENP_PHY_INT) {
1204                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1205                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1206         } else
1207                 netdev_warn(dev->net,
1208                             "unexpected interrupt: 0x%08x\n", intdata);
1209 }
1210
1211 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1212 {
1213         return MAX_EEPROM_SIZE;
1214 }
1215
1216 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1217                                       struct ethtool_eeprom *ee, u8 *data)
1218 {
1219         struct lan78xx_net *dev = netdev_priv(netdev);
1220
1221         ee->magic = LAN78XX_EEPROM_MAGIC;
1222
1223         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 }
1225
1226 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1227                                       struct ethtool_eeprom *ee, u8 *data)
1228 {
1229         struct lan78xx_net *dev = netdev_priv(netdev);
1230
1231         /* Allow entire eeprom update only */
1232         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1233             (ee->offset == 0) &&
1234             (ee->len == 512) &&
1235             (data[0] == EEPROM_INDICATOR))
1236                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1237         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1238                  (ee->offset == 0) &&
1239                  (ee->len == 512) &&
1240                  (data[0] == OTP_INDICATOR_1))
1241                 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1242
1243         return -EINVAL;
1244 }
1245
1246 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1247                                 u8 *data)
1248 {
1249         if (stringset == ETH_SS_STATS)
1250                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 }
1252
1253 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1254 {
1255         if (sset == ETH_SS_STATS)
1256                 return ARRAY_SIZE(lan78xx_gstrings);
1257         else
1258                 return -EOPNOTSUPP;
1259 }
1260
1261 static void lan78xx_get_stats(struct net_device *netdev,
1262                               struct ethtool_stats *stats, u64 *data)
1263 {
1264         struct lan78xx_net *dev = netdev_priv(netdev);
1265
1266         lan78xx_update_stats(dev);
1267
1268         mutex_lock(&dev->stats.access_lock);
1269         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1270         mutex_unlock(&dev->stats.access_lock);
1271 }
1272
1273 static void lan78xx_get_wol(struct net_device *netdev,
1274                             struct ethtool_wolinfo *wol)
1275 {
1276         struct lan78xx_net *dev = netdev_priv(netdev);
1277         int ret;
1278         u32 buf;
1279         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1280
1281         if (usb_autopm_get_interface(dev->intf) < 0)
1282                         return;
1283
1284         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1285         if (unlikely(ret < 0)) {
1286                 wol->supported = 0;
1287                 wol->wolopts = 0;
1288         } else {
1289                 if (buf & USB_CFG_RMT_WKP_) {
1290                         wol->supported = WAKE_ALL;
1291                         wol->wolopts = pdata->wol;
1292                 } else {
1293                         wol->supported = 0;
1294                         wol->wolopts = 0;
1295                 }
1296         }
1297
1298         usb_autopm_put_interface(dev->intf);
1299 }
1300
1301 static int lan78xx_set_wol(struct net_device *netdev,
1302                            struct ethtool_wolinfo *wol)
1303 {
1304         struct lan78xx_net *dev = netdev_priv(netdev);
1305         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1306         int ret;
1307
1308         ret = usb_autopm_get_interface(dev->intf);
1309         if (ret < 0)
1310                 return ret;
1311
1312         if (wol->wolopts & ~WAKE_ALL)
1313                 return -EINVAL;
1314
1315         pdata->wol = wol->wolopts;
1316
1317         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1318
1319         phy_ethtool_set_wol(netdev->phydev, wol);
1320
1321         usb_autopm_put_interface(dev->intf);
1322
1323         return ret;
1324 }
1325
1326 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1327 {
1328         struct lan78xx_net *dev = netdev_priv(net);
1329         struct phy_device *phydev = net->phydev;
1330         int ret;
1331         u32 buf;
1332
1333         ret = usb_autopm_get_interface(dev->intf);
1334         if (ret < 0)
1335                 return ret;
1336
1337         ret = phy_ethtool_get_eee(phydev, edata);
1338         if (ret < 0)
1339                 goto exit;
1340
1341         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1342         if (buf & MAC_CR_EEE_EN_) {
1343                 edata->eee_enabled = true;
1344                 edata->eee_active = !!(edata->advertised &
1345                                        edata->lp_advertised);
1346                 edata->tx_lpi_enabled = true;
1347                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1348                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1349                 edata->tx_lpi_timer = buf;
1350         } else {
1351                 edata->eee_enabled = false;
1352                 edata->eee_active = false;
1353                 edata->tx_lpi_enabled = false;
1354                 edata->tx_lpi_timer = 0;
1355         }
1356
1357         ret = 0;
1358 exit:
1359         usb_autopm_put_interface(dev->intf);
1360
1361         return ret;
1362 }
1363
1364 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1365 {
1366         struct lan78xx_net *dev = netdev_priv(net);
1367         int ret;
1368         u32 buf;
1369
1370         ret = usb_autopm_get_interface(dev->intf);
1371         if (ret < 0)
1372                 return ret;
1373
1374         if (edata->eee_enabled) {
1375                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1376                 buf |= MAC_CR_EEE_EN_;
1377                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1378
1379                 phy_ethtool_set_eee(net->phydev, edata);
1380
1381                 buf = (u32)edata->tx_lpi_timer;
1382                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1383         } else {
1384                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1385                 buf &= ~MAC_CR_EEE_EN_;
1386                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1387         }
1388
1389         usb_autopm_put_interface(dev->intf);
1390
1391         return 0;
1392 }
1393
1394 static u32 lan78xx_get_link(struct net_device *net)
1395 {
1396         phy_read_status(net->phydev);
1397
1398         return net->phydev->link;
1399 }
1400
1401 static int lan78xx_nway_reset(struct net_device *net)
1402 {
1403         return phy_start_aneg(net->phydev);
1404 }
1405
1406 static void lan78xx_get_drvinfo(struct net_device *net,
1407                                 struct ethtool_drvinfo *info)
1408 {
1409         struct lan78xx_net *dev = netdev_priv(net);
1410
1411         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1412         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1413         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1414 }
1415
1416 static u32 lan78xx_get_msglevel(struct net_device *net)
1417 {
1418         struct lan78xx_net *dev = netdev_priv(net);
1419
1420         return dev->msg_enable;
1421 }
1422
1423 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1424 {
1425         struct lan78xx_net *dev = netdev_priv(net);
1426
1427         dev->msg_enable = level;
1428 }
1429
1430 static int lan78xx_get_mdix_status(struct net_device *net)
1431 {
1432         struct phy_device *phydev = net->phydev;
1433         int buf;
1434
1435         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1436         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1437         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1438
1439         return buf;
1440 }
1441
1442 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1443 {
1444         struct lan78xx_net *dev = netdev_priv(net);
1445         struct phy_device *phydev = net->phydev;
1446         int buf;
1447
1448         if (mdix_ctrl == ETH_TP_MDI) {
1449                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1450                           LAN88XX_EXT_PAGE_SPACE_1);
1451                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1452                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1453                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1454                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1455                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1456                           LAN88XX_EXT_PAGE_SPACE_0);
1457         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1458                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1459                           LAN88XX_EXT_PAGE_SPACE_1);
1460                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1461                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1462                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1463                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1464                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1465                           LAN88XX_EXT_PAGE_SPACE_0);
1466         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1467                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1468                           LAN88XX_EXT_PAGE_SPACE_1);
1469                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1470                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1471                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1472                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1473                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1474                           LAN88XX_EXT_PAGE_SPACE_0);
1475         }
1476         dev->mdix_ctrl = mdix_ctrl;
1477 }
1478
1479 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1480 {
1481         struct lan78xx_net *dev = netdev_priv(net);
1482         struct phy_device *phydev = net->phydev;
1483         int ret;
1484         int buf;
1485
1486         ret = usb_autopm_get_interface(dev->intf);
1487         if (ret < 0)
1488                 return ret;
1489
1490         ret = phy_ethtool_gset(phydev, cmd);
1491
1492         buf = lan78xx_get_mdix_status(net);
1493
1494         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1495         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1496                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1497                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1498         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1499                 cmd->eth_tp_mdix = ETH_TP_MDI;
1500                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1501         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1502                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1503                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1504         }
1505
1506         usb_autopm_put_interface(dev->intf);
1507
1508         return ret;
1509 }
1510
1511 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1512 {
1513         struct lan78xx_net *dev = netdev_priv(net);
1514         struct phy_device *phydev = net->phydev;
1515         int ret = 0;
1516         int temp;
1517
1518         ret = usb_autopm_get_interface(dev->intf);
1519         if (ret < 0)
1520                 return ret;
1521
1522         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1523                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1524         }
1525
1526         /* change speed & duplex */
1527         ret = phy_ethtool_sset(phydev, cmd);
1528
1529         if (!cmd->autoneg) {
1530                 /* force link down */
1531                 temp = phy_read(phydev, MII_BMCR);
1532                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1533                 mdelay(1);
1534                 phy_write(phydev, MII_BMCR, temp);
1535         }
1536
1537         usb_autopm_put_interface(dev->intf);
1538
1539         return ret;
1540 }
1541
1542 static void lan78xx_get_pause(struct net_device *net,
1543                               struct ethtool_pauseparam *pause)
1544 {
1545         struct lan78xx_net *dev = netdev_priv(net);
1546         struct phy_device *phydev = net->phydev;
1547         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1548
1549         phy_ethtool_gset(phydev, &ecmd);
1550
1551         pause->autoneg = dev->fc_autoneg;
1552
1553         if (dev->fc_request_control & FLOW_CTRL_TX)
1554                 pause->tx_pause = 1;
1555
1556         if (dev->fc_request_control & FLOW_CTRL_RX)
1557                 pause->rx_pause = 1;
1558 }
1559
1560 static int lan78xx_set_pause(struct net_device *net,
1561                              struct ethtool_pauseparam *pause)
1562 {
1563         struct lan78xx_net *dev = netdev_priv(net);
1564         struct phy_device *phydev = net->phydev;
1565         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1566         int ret;
1567
1568         phy_ethtool_gset(phydev, &ecmd);
1569
1570         if (pause->autoneg && !ecmd.autoneg) {
1571                 ret = -EINVAL;
1572                 goto exit;
1573         }
1574
1575         dev->fc_request_control = 0;
1576         if (pause->rx_pause)
1577                 dev->fc_request_control |= FLOW_CTRL_RX;
1578
1579         if (pause->tx_pause)
1580                 dev->fc_request_control |= FLOW_CTRL_TX;
1581
1582         if (ecmd.autoneg) {
1583                 u32 mii_adv;
1584
1585                 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1586                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1587                 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1588                 phy_ethtool_sset(phydev, &ecmd);
1589         }
1590
1591         dev->fc_autoneg = pause->autoneg;
1592
1593         ret = 0;
1594 exit:
1595         return ret;
1596 }
1597
1598 static const struct ethtool_ops lan78xx_ethtool_ops = {
1599         .get_link       = lan78xx_get_link,
1600         .nway_reset     = lan78xx_nway_reset,
1601         .get_drvinfo    = lan78xx_get_drvinfo,
1602         .get_msglevel   = lan78xx_get_msglevel,
1603         .set_msglevel   = lan78xx_set_msglevel,
1604         .get_settings   = lan78xx_get_settings,
1605         .set_settings   = lan78xx_set_settings,
1606         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1607         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1608         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1609         .get_ethtool_stats = lan78xx_get_stats,
1610         .get_sset_count = lan78xx_get_sset_count,
1611         .get_strings    = lan78xx_get_strings,
1612         .get_wol        = lan78xx_get_wol,
1613         .set_wol        = lan78xx_set_wol,
1614         .get_eee        = lan78xx_get_eee,
1615         .set_eee        = lan78xx_set_eee,
1616         .get_pauseparam = lan78xx_get_pause,
1617         .set_pauseparam = lan78xx_set_pause,
1618 };
1619
1620 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1621 {
1622         if (!netif_running(netdev))
1623                 return -EINVAL;
1624
1625         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1626 }
1627
1628 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1629 {
1630         u32 addr_lo, addr_hi;
1631         int ret;
1632         u8 addr[6];
1633
1634         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1635         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1636
1637         addr[0] = addr_lo & 0xFF;
1638         addr[1] = (addr_lo >> 8) & 0xFF;
1639         addr[2] = (addr_lo >> 16) & 0xFF;
1640         addr[3] = (addr_lo >> 24) & 0xFF;
1641         addr[4] = addr_hi & 0xFF;
1642         addr[5] = (addr_hi >> 8) & 0xFF;
1643
1644         if (!is_valid_ether_addr(addr)) {
1645                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1646                         /* valid address present in Device Tree */
1647                         netif_dbg(dev, ifup, dev->net,
1648                                   "MAC address read from Device Tree");
1649                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1650                                                  ETH_ALEN, addr) == 0) ||
1651                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1652                                               ETH_ALEN, addr) == 0)) &&
1653                            is_valid_ether_addr(addr)) {
1654                         /* eeprom values are valid so use them */
1655                         netif_dbg(dev, ifup, dev->net,
1656                                   "MAC address read from EEPROM");
1657                 } else {
1658                         /* generate random MAC */
1659                         random_ether_addr(addr);
1660                         netif_dbg(dev, ifup, dev->net,
1661                                   "MAC address set to random addr");
1662                 }
1663
1664                 addr_lo = addr[0] | (addr[1] << 8) |
1665                           (addr[2] << 16) | (addr[3] << 24);
1666                 addr_hi = addr[4] | (addr[5] << 8);
1667
1668                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1669                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1670         }
1671
1672         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1673         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1674
1675         ether_addr_copy(dev->net->dev_addr, addr);
1676 }
1677
1678 /* MDIO read and write wrappers for phylib */
1679 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1680 {
1681         struct lan78xx_net *dev = bus->priv;
1682         u32 val, addr;
1683         int ret;
1684
1685         ret = usb_autopm_get_interface(dev->intf);
1686         if (ret < 0)
1687                 return ret;
1688
1689         mutex_lock(&dev->phy_mutex);
1690
1691         /* confirm MII not busy */
1692         ret = lan78xx_phy_wait_not_busy(dev);
1693         if (ret < 0)
1694                 goto done;
1695
1696         /* set the address, index & direction (read from PHY) */
1697         addr = mii_access(phy_id, idx, MII_READ);
1698         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1699
1700         ret = lan78xx_phy_wait_not_busy(dev);
1701         if (ret < 0)
1702                 goto done;
1703
1704         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1705
1706         ret = (int)(val & 0xFFFF);
1707
1708 done:
1709         mutex_unlock(&dev->phy_mutex);
1710         usb_autopm_put_interface(dev->intf);
1711         return ret;
1712 }
1713
1714 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1715                                  u16 regval)
1716 {
1717         struct lan78xx_net *dev = bus->priv;
1718         u32 val, addr;
1719         int ret;
1720
1721         ret = usb_autopm_get_interface(dev->intf);
1722         if (ret < 0)
1723                 return ret;
1724
1725         mutex_lock(&dev->phy_mutex);
1726
1727         /* confirm MII not busy */
1728         ret = lan78xx_phy_wait_not_busy(dev);
1729         if (ret < 0)
1730                 goto done;
1731
1732         val = (u32)regval;
1733         ret = lan78xx_write_reg(dev, MII_DATA, val);
1734
1735         /* set the address, index & direction (write to PHY) */
1736         addr = mii_access(phy_id, idx, MII_WRITE);
1737         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1738
1739         ret = lan78xx_phy_wait_not_busy(dev);
1740         if (ret < 0)
1741                 goto done;
1742
1743 done:
1744         mutex_unlock(&dev->phy_mutex);
1745         usb_autopm_put_interface(dev->intf);
1746         return 0;
1747 }
1748
1749 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1750 {
1751         int ret;
1752
1753         dev->mdiobus = mdiobus_alloc();
1754         if (!dev->mdiobus) {
1755                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1756                 return -ENOMEM;
1757         }
1758
1759         dev->mdiobus->priv = (void *)dev;
1760         dev->mdiobus->read = lan78xx_mdiobus_read;
1761         dev->mdiobus->write = lan78xx_mdiobus_write;
1762         dev->mdiobus->name = "lan78xx-mdiobus";
1763         dev->mdiobus->parent = &dev->udev->dev;
1764
1765         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1766                  dev->udev->bus->busnum, dev->udev->devnum);
1767
1768         switch (dev->chipid) {
1769         case ID_REV_CHIP_ID_7800_:
1770         case ID_REV_CHIP_ID_7850_:
1771                 /* set to internal PHY id */
1772                 dev->mdiobus->phy_mask = ~(1 << 1);
1773                 break;
1774         }
1775
1776         ret = mdiobus_register(dev->mdiobus);
1777         if (ret) {
1778                 netdev_err(dev->net, "can't register MDIO bus\n");
1779                 goto exit1;
1780         }
1781
1782         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1783         return 0;
1784 exit1:
1785         mdiobus_free(dev->mdiobus);
1786         return ret;
1787 }
1788
1789 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1790 {
1791         mdiobus_unregister(dev->mdiobus);
1792         mdiobus_free(dev->mdiobus);
1793 }
1794
1795 static void lan78xx_link_status_change(struct net_device *net)
1796 {
1797         struct phy_device *phydev = net->phydev;
1798         int ret, temp;
1799
1800         /* At forced 100 F/H mode, chip may fail to set mode correctly
1801          * when cable is switched between long(~50+m) and short one.
1802          * As workaround, set to 10 before setting to 100
1803          * at forced 100 F/H mode.
1804          */
1805         if (!phydev->autoneg && (phydev->speed == 100)) {
1806                 /* disable phy interrupt */
1807                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1808                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1809                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1810
1811                 temp = phy_read(phydev, MII_BMCR);
1812                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1813                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1814                 temp |= BMCR_SPEED100;
1815                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1816
1817                 /* clear pending interrupt generated while workaround */
1818                 temp = phy_read(phydev, LAN88XX_INT_STS);
1819
1820                 /* enable phy interrupt back */
1821                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1822                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1823                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1824         }
1825 }
1826
1827 static int lan78xx_phy_init(struct lan78xx_net *dev)
1828 {
1829         int ret;
1830         u32 mii_adv;
1831         struct phy_device *phydev = dev->net->phydev;
1832
1833         phydev = phy_find_first(dev->mdiobus);
1834         if (!phydev) {
1835                 netdev_err(dev->net, "no PHY found\n");
1836                 return -EIO;
1837         }
1838
1839         /* Enable PHY interrupts.
1840          * We handle our own interrupt
1841          */
1842         ret = phy_read(phydev, LAN88XX_INT_STS);
1843         ret = phy_write(phydev, LAN88XX_INT_MASK,
1844                         LAN88XX_INT_MASK_MDINTPIN_EN_ |
1845                         LAN88XX_INT_MASK_LINK_CHANGE_);
1846
1847         phydev->irq = PHY_IGNORE_INTERRUPT;
1848
1849         ret = phy_connect_direct(dev->net, phydev,
1850                                  lan78xx_link_status_change,
1851                                  PHY_INTERFACE_MODE_GMII);
1852         if (ret) {
1853                 netdev_err(dev->net, "can't attach PHY to %s\n",
1854                            dev->mdiobus->id);
1855                 return -EIO;
1856         }
1857
1858         /* set to AUTOMDIX */
1859         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1860
1861         /* MAC doesn't support 1000T Half */
1862         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1863
1864         /* support both flow controls */
1865         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1866         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1867         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1868         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1869
1870         genphy_config_aneg(phydev);
1871
1872         dev->fc_autoneg = phydev->autoneg;
1873
1874         phy_start(phydev);
1875
1876         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1877
1878         return 0;
1879 }
1880
1881 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1882 {
1883         int ret = 0;
1884         u32 buf;
1885         bool rxenabled;
1886
1887         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1888
1889         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1890
1891         if (rxenabled) {
1892                 buf &= ~MAC_RX_RXEN_;
1893                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1894         }
1895
1896         /* add 4 to size for FCS */
1897         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1898         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1899
1900         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1901
1902         if (rxenabled) {
1903                 buf |= MAC_RX_RXEN_;
1904                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1905         }
1906
1907         return 0;
1908 }
1909
1910 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1911 {
1912         struct sk_buff *skb;
1913         unsigned long flags;
1914         int count = 0;
1915
1916         spin_lock_irqsave(&q->lock, flags);
1917         while (!skb_queue_empty(q)) {
1918                 struct skb_data *entry;
1919                 struct urb *urb;
1920                 int ret;
1921
1922                 skb_queue_walk(q, skb) {
1923                         entry = (struct skb_data *)skb->cb;
1924                         if (entry->state != unlink_start)
1925                                 goto found;
1926                 }
1927                 break;
1928 found:
1929                 entry->state = unlink_start;
1930                 urb = entry->urb;
1931
1932                 /* Get reference count of the URB to avoid it to be
1933                  * freed during usb_unlink_urb, which may trigger
1934                  * use-after-free problem inside usb_unlink_urb since
1935                  * usb_unlink_urb is always racing with .complete
1936                  * handler(include defer_bh).
1937                  */
1938                 usb_get_urb(urb);
1939                 spin_unlock_irqrestore(&q->lock, flags);
1940                 /* during some PM-driven resume scenarios,
1941                  * these (async) unlinks complete immediately
1942                  */
1943                 ret = usb_unlink_urb(urb);
1944                 if (ret != -EINPROGRESS && ret != 0)
1945                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1946                 else
1947                         count++;
1948                 usb_put_urb(urb);
1949                 spin_lock_irqsave(&q->lock, flags);
1950         }
1951         spin_unlock_irqrestore(&q->lock, flags);
1952         return count;
1953 }
1954
1955 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1956 {
1957         struct lan78xx_net *dev = netdev_priv(netdev);
1958         int ll_mtu = new_mtu + netdev->hard_header_len;
1959         int old_hard_mtu = dev->hard_mtu;
1960         int old_rx_urb_size = dev->rx_urb_size;
1961         int ret;
1962
1963         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1964                 return -EINVAL;
1965
1966         if (new_mtu <= 0)
1967                 return -EINVAL;
1968         /* no second zero-length packet read wanted after mtu-sized packets */
1969         if ((ll_mtu % dev->maxpacket) == 0)
1970                 return -EDOM;
1971
1972         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1973
1974         netdev->mtu = new_mtu;
1975
1976         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1977         if (dev->rx_urb_size == old_hard_mtu) {
1978                 dev->rx_urb_size = dev->hard_mtu;
1979                 if (dev->rx_urb_size > old_rx_urb_size) {
1980                         if (netif_running(dev->net)) {
1981                                 unlink_urbs(dev, &dev->rxq);
1982                                 tasklet_schedule(&dev->bh);
1983                         }
1984                 }
1985         }
1986
1987         return 0;
1988 }
1989
1990 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1991 {
1992         struct lan78xx_net *dev = netdev_priv(netdev);
1993         struct sockaddr *addr = p;
1994         u32 addr_lo, addr_hi;
1995         int ret;
1996
1997         if (netif_running(netdev))
1998                 return -EBUSY;
1999
2000         if (!is_valid_ether_addr(addr->sa_data))
2001                 return -EADDRNOTAVAIL;
2002
2003         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2004
2005         addr_lo = netdev->dev_addr[0] |
2006                   netdev->dev_addr[1] << 8 |
2007                   netdev->dev_addr[2] << 16 |
2008                   netdev->dev_addr[3] << 24;
2009         addr_hi = netdev->dev_addr[4] |
2010                   netdev->dev_addr[5] << 8;
2011
2012         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2013         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2014
2015         /* Added to support MAC address changes */
2016         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2017         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2018
2019         return 0;
2020 }
2021
2022 /* Enable or disable Rx checksum offload engine */
2023 static int lan78xx_set_features(struct net_device *netdev,
2024                                 netdev_features_t features)
2025 {
2026         struct lan78xx_net *dev = netdev_priv(netdev);
2027         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2028         unsigned long flags;
2029         int ret;
2030
2031         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2032
2033         if (features & NETIF_F_RXCSUM) {
2034                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2035                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2036         } else {
2037                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2038                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2039         }
2040
2041         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2042                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2043         else
2044                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2045
2046         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2047
2048         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2049
2050         return 0;
2051 }
2052
2053 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2054 {
2055         struct lan78xx_priv *pdata =
2056                         container_of(param, struct lan78xx_priv, set_vlan);
2057         struct lan78xx_net *dev = pdata->dev;
2058
2059         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2060                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2061 }
2062
2063 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2064                                    __be16 proto, u16 vid)
2065 {
2066         struct lan78xx_net *dev = netdev_priv(netdev);
2067         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2068         u16 vid_bit_index;
2069         u16 vid_dword_index;
2070
2071         vid_dword_index = (vid >> 5) & 0x7F;
2072         vid_bit_index = vid & 0x1F;
2073
2074         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2075
2076         /* defer register writes to a sleepable context */
2077         schedule_work(&pdata->set_vlan);
2078
2079         return 0;
2080 }
2081
2082 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2083                                     __be16 proto, u16 vid)
2084 {
2085         struct lan78xx_net *dev = netdev_priv(netdev);
2086         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2087         u16 vid_bit_index;
2088         u16 vid_dword_index;
2089
2090         vid_dword_index = (vid >> 5) & 0x7F;
2091         vid_bit_index = vid & 0x1F;
2092
2093         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2094
2095         /* defer register writes to a sleepable context */
2096         schedule_work(&pdata->set_vlan);
2097
2098         return 0;
2099 }
2100
2101 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2102 {
2103         int ret;
2104         u32 buf;
2105         u32 regs[6] = { 0 };
2106
2107         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2108         if (buf & USB_CFG1_LTM_ENABLE_) {
2109                 u8 temp[2];
2110                 /* Get values from EEPROM first */
2111                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2112                         if (temp[0] == 24) {
2113                                 ret = lan78xx_read_raw_eeprom(dev,
2114                                                               temp[1] * 2,
2115                                                               24,
2116                                                               (u8 *)regs);
2117                                 if (ret < 0)
2118                                         return;
2119                         }
2120                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2121                         if (temp[0] == 24) {
2122                                 ret = lan78xx_read_raw_otp(dev,
2123                                                            temp[1] * 2,
2124                                                            24,
2125                                                            (u8 *)regs);
2126                                 if (ret < 0)
2127                                         return;
2128                         }
2129                 }
2130         }
2131
2132         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2133         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2134         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2135         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2136         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2137         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2138 }
2139
2140 static int lan78xx_reset(struct lan78xx_net *dev)
2141 {
2142         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2143         u32 buf;
2144         int ret = 0;
2145         unsigned long timeout;
2146
2147         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2148         buf |= HW_CFG_LRST_;
2149         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2150
2151         timeout = jiffies + HZ;
2152         do {
2153                 mdelay(1);
2154                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2155                 if (time_after(jiffies, timeout)) {
2156                         netdev_warn(dev->net,
2157                                     "timeout on completion of LiteReset");
2158                         return -EIO;
2159                 }
2160         } while (buf & HW_CFG_LRST_);
2161
2162         lan78xx_init_mac_address(dev);
2163
2164         /* save DEVID for later usage */
2165         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2166         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2167         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2168
2169         /* Respond to the IN token with a NAK */
2170         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2171         buf |= USB_CFG_BIR_;
2172         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2173
2174         /* Init LTM */
2175         lan78xx_init_ltm(dev);
2176
2177         dev->net->hard_header_len += TX_OVERHEAD;
2178         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2179
2180         if (dev->udev->speed == USB_SPEED_SUPER) {
2181                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2182                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2183                 dev->rx_qlen = 4;
2184                 dev->tx_qlen = 4;
2185         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2186                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2187                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2188                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2189                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2190         } else {
2191                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2192                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2193                 dev->rx_qlen = 4;
2194                 dev->tx_qlen = 4;
2195         }
2196
2197         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2198         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2199
2200         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2201         buf |= HW_CFG_MEF_;
2202         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2203
2204         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2205         buf |= USB_CFG_BCE_;
2206         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2207
2208         /* set FIFO sizes */
2209         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2210         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2211
2212         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2213         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2214
2215         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2216         ret = lan78xx_write_reg(dev, FLOW, 0);
2217         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2218
2219         /* Don't need rfe_ctl_lock during initialisation */
2220         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2221         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2222         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2223
2224         /* Enable or disable checksum offload engines */
2225         lan78xx_set_features(dev->net, dev->net->features);
2226
2227         lan78xx_set_multicast(dev->net);
2228
2229         /* reset PHY */
2230         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2231         buf |= PMT_CTL_PHY_RST_;
2232         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2233
2234         timeout = jiffies + HZ;
2235         do {
2236                 mdelay(1);
2237                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2238                 if (time_after(jiffies, timeout)) {
2239                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2240                         return -EIO;
2241                 }
2242         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2243
2244         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2245         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2246         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2247
2248         /* enable PHY interrupts */
2249         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2250         buf |= INT_ENP_PHY_INT;
2251         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2252
2253         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2254         buf |= MAC_TX_TXEN_;
2255         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2256
2257         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2258         buf |= FCT_TX_CTL_EN_;
2259         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2260
2261         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2262
2263         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2264         buf |= MAC_RX_RXEN_;
2265         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2266
2267         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2268         buf |= FCT_RX_CTL_EN_;
2269         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2270
2271         return 0;
2272 }
2273
2274 static void lan78xx_init_stats(struct lan78xx_net *dev)
2275 {
2276         u32 *p;
2277         int i;
2278
2279         /* initialize for stats update
2280          * some counters are 20bits and some are 32bits
2281          */
2282         p = (u32 *)&dev->stats.rollover_max;
2283         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2284                 p[i] = 0xFFFFF;
2285
2286         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2287         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2288         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2289         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2290         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2291         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2292         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2293         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2294         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2295         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2296
2297         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2298 }
2299
2300 static int lan78xx_open(struct net_device *net)
2301 {
2302         struct lan78xx_net *dev = netdev_priv(net);
2303         int ret;
2304
2305         ret = usb_autopm_get_interface(dev->intf);
2306         if (ret < 0)
2307                 goto out;
2308
2309         ret = lan78xx_reset(dev);
2310         if (ret < 0)
2311                 goto done;
2312
2313         ret = lan78xx_phy_init(dev);
2314         if (ret < 0)
2315                 goto done;
2316
2317         /* for Link Check */
2318         if (dev->urb_intr) {
2319                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2320                 if (ret < 0) {
2321                         netif_err(dev, ifup, dev->net,
2322                                   "intr submit %d\n", ret);
2323                         goto done;
2324                 }
2325         }
2326
2327         lan78xx_init_stats(dev);
2328
2329         set_bit(EVENT_DEV_OPEN, &dev->flags);
2330
2331         netif_start_queue(net);
2332
2333         dev->link_on = false;
2334
2335         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2336 done:
2337         usb_autopm_put_interface(dev->intf);
2338
2339 out:
2340         return ret;
2341 }
2342
2343 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2344 {
2345         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2346         DECLARE_WAITQUEUE(wait, current);
2347         int temp;
2348
2349         /* ensure there are no more active urbs */
2350         add_wait_queue(&unlink_wakeup, &wait);
2351         set_current_state(TASK_UNINTERRUPTIBLE);
2352         dev->wait = &unlink_wakeup;
2353         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2354
2355         /* maybe wait for deletions to finish. */
2356         while (!skb_queue_empty(&dev->rxq) &&
2357                !skb_queue_empty(&dev->txq) &&
2358                !skb_queue_empty(&dev->done)) {
2359                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2360                 set_current_state(TASK_UNINTERRUPTIBLE);
2361                 netif_dbg(dev, ifdown, dev->net,
2362                           "waited for %d urb completions\n", temp);
2363         }
2364         set_current_state(TASK_RUNNING);
2365         dev->wait = NULL;
2366         remove_wait_queue(&unlink_wakeup, &wait);
2367 }
2368
2369 static int lan78xx_stop(struct net_device *net)
2370 {
2371         struct lan78xx_net              *dev = netdev_priv(net);
2372
2373         if (timer_pending(&dev->stat_monitor))
2374                 del_timer_sync(&dev->stat_monitor);
2375
2376         phy_stop(net->phydev);
2377         phy_disconnect(net->phydev);
2378         net->phydev = NULL;
2379
2380         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2381         netif_stop_queue(net);
2382
2383         netif_info(dev, ifdown, dev->net,
2384                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2385                    net->stats.rx_packets, net->stats.tx_packets,
2386                    net->stats.rx_errors, net->stats.tx_errors);
2387
2388         lan78xx_terminate_urbs(dev);
2389
2390         usb_kill_urb(dev->urb_intr);
2391
2392         skb_queue_purge(&dev->rxq_pause);
2393
2394         /* deferred work (task, timer, softirq) must also stop.
2395          * can't flush_scheduled_work() until we drop rtnl (later),
2396          * else workers could deadlock; so make workers a NOP.
2397          */
2398         dev->flags = 0;
2399         cancel_delayed_work_sync(&dev->wq);
2400         tasklet_kill(&dev->bh);
2401
2402         usb_autopm_put_interface(dev->intf);
2403
2404         return 0;
2405 }
2406
2407 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2408                                        struct sk_buff *skb, gfp_t flags)
2409 {
2410         u32 tx_cmd_a, tx_cmd_b;
2411
2412         if (skb_cow_head(skb, TX_OVERHEAD)) {
2413                 dev_kfree_skb_any(skb);
2414                 return NULL;
2415         }
2416
2417         if (skb_linearize(skb)) {
2418                 dev_kfree_skb_any(skb);
2419                 return NULL;
2420         }
2421
2422         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2423
2424         if (skb->ip_summed == CHECKSUM_PARTIAL)
2425                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2426
2427         tx_cmd_b = 0;
2428         if (skb_is_gso(skb)) {
2429                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2430
2431                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2432
2433                 tx_cmd_a |= TX_CMD_A_LSO_;
2434         }
2435
2436         if (skb_vlan_tag_present(skb)) {
2437                 tx_cmd_a |= TX_CMD_A_IVTG_;
2438                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2439         }
2440
2441         skb_push(skb, 4);
2442         cpu_to_le32s(&tx_cmd_b);
2443         memcpy(skb->data, &tx_cmd_b, 4);
2444
2445         skb_push(skb, 4);
2446         cpu_to_le32s(&tx_cmd_a);
2447         memcpy(skb->data, &tx_cmd_a, 4);
2448
2449         return skb;
2450 }
2451
2452 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2453                                struct sk_buff_head *list, enum skb_state state)
2454 {
2455         unsigned long flags;
2456         enum skb_state old_state;
2457         struct skb_data *entry = (struct skb_data *)skb->cb;
2458
2459         spin_lock_irqsave(&list->lock, flags);
2460         old_state = entry->state;
2461         entry->state = state;
2462
2463         __skb_unlink(skb, list);
2464         spin_unlock(&list->lock);
2465         spin_lock(&dev->done.lock);
2466
2467         __skb_queue_tail(&dev->done, skb);
2468         if (skb_queue_len(&dev->done) == 1)
2469                 tasklet_schedule(&dev->bh);
2470         spin_unlock_irqrestore(&dev->done.lock, flags);
2471
2472         return old_state;
2473 }
2474
2475 static void tx_complete(struct urb *urb)
2476 {
2477         struct sk_buff *skb = (struct sk_buff *)urb->context;
2478         struct skb_data *entry = (struct skb_data *)skb->cb;
2479         struct lan78xx_net *dev = entry->dev;
2480
2481         if (urb->status == 0) {
2482                 dev->net->stats.tx_packets += entry->num_of_packet;
2483                 dev->net->stats.tx_bytes += entry->length;
2484         } else {
2485                 dev->net->stats.tx_errors++;
2486
2487                 switch (urb->status) {
2488                 case -EPIPE:
2489                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2490                         break;
2491
2492                 /* software-driven interface shutdown */
2493                 case -ECONNRESET:
2494                 case -ESHUTDOWN:
2495                         break;
2496
2497                 case -EPROTO:
2498                 case -ETIME:
2499                 case -EILSEQ:
2500                         netif_stop_queue(dev->net);
2501                         break;
2502                 default:
2503                         netif_dbg(dev, tx_err, dev->net,
2504                                   "tx err %d\n", entry->urb->status);
2505                         break;
2506                 }
2507         }
2508
2509         usb_autopm_put_interface_async(dev->intf);
2510
2511         defer_bh(dev, skb, &dev->txq, tx_done);
2512 }
2513
2514 static void lan78xx_queue_skb(struct sk_buff_head *list,
2515                               struct sk_buff *newsk, enum skb_state state)
2516 {
2517         struct skb_data *entry = (struct skb_data *)newsk->cb;
2518
2519         __skb_queue_tail(list, newsk);
2520         entry->state = state;
2521 }
2522
2523 static netdev_tx_t
2524 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2525 {
2526         struct lan78xx_net *dev = netdev_priv(net);
2527         struct sk_buff *skb2 = NULL;
2528
2529         if (skb) {
2530                 skb_tx_timestamp(skb);
2531                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2532         }
2533
2534         if (skb2) {
2535                 skb_queue_tail(&dev->txq_pend, skb2);
2536
2537                 /* throttle TX patch at slower than SUPER SPEED USB */
2538                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2539                     (skb_queue_len(&dev->txq_pend) > 10))
2540                         netif_stop_queue(net);
2541         } else {
2542                 netif_dbg(dev, tx_err, dev->net,
2543                           "lan78xx_tx_prep return NULL\n");
2544                 dev->net->stats.tx_errors++;
2545                 dev->net->stats.tx_dropped++;
2546         }
2547
2548         tasklet_schedule(&dev->bh);
2549
2550         return NETDEV_TX_OK;
2551 }
2552
2553 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2554 {
2555         struct lan78xx_priv *pdata = NULL;
2556         int ret;
2557         int i;
2558
2559         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2560
2561         pdata = (struct lan78xx_priv *)(dev->data[0]);
2562         if (!pdata) {
2563                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2564                 return -ENOMEM;
2565         }
2566
2567         pdata->dev = dev;
2568
2569         spin_lock_init(&pdata->rfe_ctl_lock);
2570         mutex_init(&pdata->dataport_mutex);
2571
2572         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2573
2574         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2575                 pdata->vlan_table[i] = 0;
2576
2577         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2578
2579         dev->net->features = 0;
2580
2581         if (DEFAULT_TX_CSUM_ENABLE)
2582                 dev->net->features |= NETIF_F_HW_CSUM;
2583
2584         if (DEFAULT_RX_CSUM_ENABLE)
2585                 dev->net->features |= NETIF_F_RXCSUM;
2586
2587         if (DEFAULT_TSO_CSUM_ENABLE)
2588                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2589
2590         dev->net->hw_features = dev->net->features;
2591
2592         /* Init all registers */
2593         ret = lan78xx_reset(dev);
2594
2595         lan78xx_mdio_init(dev);
2596
2597         dev->net->flags |= IFF_MULTICAST;
2598
2599         pdata->wol = WAKE_MAGIC;
2600
2601         return 0;
2602 }
2603
2604 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2605 {
2606         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2607
2608         lan78xx_remove_mdio(dev);
2609
2610         if (pdata) {
2611                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2612                 kfree(pdata);
2613                 pdata = NULL;
2614                 dev->data[0] = 0;
2615         }
2616 }
2617
2618 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2619                                     struct sk_buff *skb,
2620                                     u32 rx_cmd_a, u32 rx_cmd_b)
2621 {
2622         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2623             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2624                 skb->ip_summed = CHECKSUM_NONE;
2625         } else {
2626                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2627                 skb->ip_summed = CHECKSUM_COMPLETE;
2628         }
2629 }
2630
2631 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2632 {
2633         int             status;
2634
2635         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2636                 skb_queue_tail(&dev->rxq_pause, skb);
2637                 return;
2638         }
2639
2640         dev->net->stats.rx_packets++;
2641         dev->net->stats.rx_bytes += skb->len;
2642
2643         skb->protocol = eth_type_trans(skb, dev->net);
2644
2645         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2646                   skb->len + sizeof(struct ethhdr), skb->protocol);
2647         memset(skb->cb, 0, sizeof(struct skb_data));
2648
2649         if (skb_defer_rx_timestamp(skb))
2650                 return;
2651
2652         status = netif_rx(skb);
2653         if (status != NET_RX_SUCCESS)
2654                 netif_dbg(dev, rx_err, dev->net,
2655                           "netif_rx status %d\n", status);
2656 }
2657
2658 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2659 {
2660         if (skb->len < dev->net->hard_header_len)
2661                 return 0;
2662
2663         while (skb->len > 0) {
2664                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2665                 u16 rx_cmd_c;
2666                 struct sk_buff *skb2;
2667                 unsigned char *packet;
2668
2669                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2670                 le32_to_cpus(&rx_cmd_a);
2671                 skb_pull(skb, sizeof(rx_cmd_a));
2672
2673                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2674                 le32_to_cpus(&rx_cmd_b);
2675                 skb_pull(skb, sizeof(rx_cmd_b));
2676
2677                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2678                 le16_to_cpus(&rx_cmd_c);
2679                 skb_pull(skb, sizeof(rx_cmd_c));
2680
2681                 packet = skb->data;
2682
2683                 /* get the packet length */
2684                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2685                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2686
2687                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2688                         netif_dbg(dev, rx_err, dev->net,
2689                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2690                 } else {
2691                         /* last frame in this batch */
2692                         if (skb->len == size) {
2693                                 lan78xx_rx_csum_offload(dev, skb,
2694                                                         rx_cmd_a, rx_cmd_b);
2695
2696                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2697                                 skb->truesize = size + sizeof(struct sk_buff);
2698
2699                                 return 1;
2700                         }
2701
2702                         skb2 = skb_clone(skb, GFP_ATOMIC);
2703                         if (unlikely(!skb2)) {
2704                                 netdev_warn(dev->net, "Error allocating skb");
2705                                 return 0;
2706                         }
2707
2708                         skb2->len = size;
2709                         skb2->data = packet;
2710                         skb_set_tail_pointer(skb2, size);
2711
2712                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2713
2714                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2715                         skb2->truesize = size + sizeof(struct sk_buff);
2716
2717                         lan78xx_skb_return(dev, skb2);
2718                 }
2719
2720                 skb_pull(skb, size);
2721
2722                 /* padding bytes before the next frame starts */
2723                 if (skb->len)
2724                         skb_pull(skb, align_count);
2725         }
2726
2727         return 1;
2728 }
2729
2730 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2731 {
2732         if (!lan78xx_rx(dev, skb)) {
2733                 dev->net->stats.rx_errors++;
2734                 goto done;
2735         }
2736
2737         if (skb->len) {
2738                 lan78xx_skb_return(dev, skb);
2739                 return;
2740         }
2741
2742         netif_dbg(dev, rx_err, dev->net, "drop\n");
2743         dev->net->stats.rx_errors++;
2744 done:
2745         skb_queue_tail(&dev->done, skb);
2746 }
2747
2748 static void rx_complete(struct urb *urb);
2749
2750 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2751 {
2752         struct sk_buff *skb;
2753         struct skb_data *entry;
2754         unsigned long lockflags;
2755         size_t size = dev->rx_urb_size;
2756         int ret = 0;
2757
2758         skb = netdev_alloc_skb_ip_align(dev->net, size);
2759         if (!skb) {
2760                 usb_free_urb(urb);
2761                 return -ENOMEM;
2762         }
2763
2764         entry = (struct skb_data *)skb->cb;
2765         entry->urb = urb;
2766         entry->dev = dev;
2767         entry->length = 0;
2768
2769         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2770                           skb->data, size, rx_complete, skb);
2771
2772         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2773
2774         if (netif_device_present(dev->net) &&
2775             netif_running(dev->net) &&
2776             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2777             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2778                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2779                 switch (ret) {
2780                 case 0:
2781                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2782                         break;
2783                 case -EPIPE:
2784                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2785                         break;
2786                 case -ENODEV:
2787                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2788                         netif_device_detach(dev->net);
2789                         break;
2790                 case -EHOSTUNREACH:
2791                         ret = -ENOLINK;
2792                         break;
2793                 default:
2794                         netif_dbg(dev, rx_err, dev->net,
2795                                   "rx submit, %d\n", ret);
2796                         tasklet_schedule(&dev->bh);
2797                 }
2798         } else {
2799                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2800                 ret = -ENOLINK;
2801         }
2802         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2803         if (ret) {
2804                 dev_kfree_skb_any(skb);
2805                 usb_free_urb(urb);
2806         }
2807         return ret;
2808 }
2809
2810 static void rx_complete(struct urb *urb)
2811 {
2812         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2813         struct skb_data *entry = (struct skb_data *)skb->cb;
2814         struct lan78xx_net *dev = entry->dev;
2815         int urb_status = urb->status;
2816         enum skb_state state;
2817
2818         skb_put(skb, urb->actual_length);
2819         state = rx_done;
2820         entry->urb = NULL;
2821
2822         switch (urb_status) {
2823         case 0:
2824                 if (skb->len < dev->net->hard_header_len) {
2825                         state = rx_cleanup;
2826                         dev->net->stats.rx_errors++;
2827                         dev->net->stats.rx_length_errors++;
2828                         netif_dbg(dev, rx_err, dev->net,
2829                                   "rx length %d\n", skb->len);
2830                 }
2831                 usb_mark_last_busy(dev->udev);
2832                 break;
2833         case -EPIPE:
2834                 dev->net->stats.rx_errors++;
2835                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2836                 /* FALLTHROUGH */
2837         case -ECONNRESET:                               /* async unlink */
2838         case -ESHUTDOWN:                                /* hardware gone */
2839                 netif_dbg(dev, ifdown, dev->net,
2840                           "rx shutdown, code %d\n", urb_status);
2841                 state = rx_cleanup;
2842                 entry->urb = urb;
2843                 urb = NULL;
2844                 break;
2845         case -EPROTO:
2846         case -ETIME:
2847         case -EILSEQ:
2848                 dev->net->stats.rx_errors++;
2849                 state = rx_cleanup;
2850                 entry->urb = urb;
2851                 urb = NULL;
2852                 break;
2853
2854         /* data overrun ... flush fifo? */
2855         case -EOVERFLOW:
2856                 dev->net->stats.rx_over_errors++;
2857                 /* FALLTHROUGH */
2858
2859         default:
2860                 state = rx_cleanup;
2861                 dev->net->stats.rx_errors++;
2862                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2863                 break;
2864         }
2865
2866         state = defer_bh(dev, skb, &dev->rxq, state);
2867
2868         if (urb) {
2869                 if (netif_running(dev->net) &&
2870                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2871                     state != unlink_start) {
2872                         rx_submit(dev, urb, GFP_ATOMIC);
2873                         return;
2874                 }
2875                 usb_free_urb(urb);
2876         }
2877         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2878 }
2879
2880 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2881 {
2882         int length;
2883         struct urb *urb = NULL;
2884         struct skb_data *entry;
2885         unsigned long flags;
2886         struct sk_buff_head *tqp = &dev->txq_pend;
2887         struct sk_buff *skb, *skb2;
2888         int ret;
2889         int count, pos;
2890         int skb_totallen, pkt_cnt;
2891
2892         skb_totallen = 0;
2893         pkt_cnt = 0;
2894         count = 0;
2895         length = 0;
2896         spin_lock_irqsave(&tqp->lock, flags);
2897         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2898                 if (skb_is_gso(skb)) {
2899                         if (pkt_cnt) {
2900                                 /* handle previous packets first */
2901                                 break;
2902                         }
2903                         count = 1;
2904                         length = skb->len - TX_OVERHEAD;
2905                         __skb_unlink(skb, tqp);
2906                         spin_unlock_irqrestore(&tqp->lock, flags);
2907                         goto gso_skb;
2908                 }
2909
2910                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2911                         break;
2912                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2913                 pkt_cnt++;
2914         }
2915         spin_unlock_irqrestore(&tqp->lock, flags);
2916
2917         /* copy to a single skb */
2918         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2919         if (!skb)
2920                 goto drop;
2921
2922         skb_put(skb, skb_totallen);
2923
2924         for (count = pos = 0; count < pkt_cnt; count++) {
2925                 skb2 = skb_dequeue(tqp);
2926                 if (skb2) {
2927                         length += (skb2->len - TX_OVERHEAD);
2928                         memcpy(skb->data + pos, skb2->data, skb2->len);
2929                         pos += roundup(skb2->len, sizeof(u32));
2930                         dev_kfree_skb(skb2);
2931                 }
2932         }
2933
2934 gso_skb:
2935         urb = usb_alloc_urb(0, GFP_ATOMIC);
2936         if (!urb)
2937                 goto drop;
2938
2939         entry = (struct skb_data *)skb->cb;
2940         entry->urb = urb;
2941         entry->dev = dev;
2942         entry->length = length;
2943         entry->num_of_packet = count;
2944
2945         spin_lock_irqsave(&dev->txq.lock, flags);
2946         ret = usb_autopm_get_interface_async(dev->intf);
2947         if (ret < 0) {
2948                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2949                 goto drop;
2950         }
2951
2952         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2953                           skb->data, skb->len, tx_complete, skb);
2954
2955         if (length % dev->maxpacket == 0) {
2956                 /* send USB_ZERO_PACKET */
2957                 urb->transfer_flags |= URB_ZERO_PACKET;
2958         }
2959
2960 #ifdef CONFIG_PM
2961         /* if this triggers the device is still a sleep */
2962         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2963                 /* transmission will be done in resume */
2964                 usb_anchor_urb(urb, &dev->deferred);
2965                 /* no use to process more packets */
2966                 netif_stop_queue(dev->net);
2967                 usb_put_urb(urb);
2968                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2969                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2970                 return;
2971         }
2972 #endif
2973
2974         ret = usb_submit_urb(urb, GFP_ATOMIC);
2975         switch (ret) {
2976         case 0:
2977                 netif_trans_update(dev->net);
2978                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2979                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2980                         netif_stop_queue(dev->net);
2981                 break;
2982         case -EPIPE:
2983                 netif_stop_queue(dev->net);
2984                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2985                 usb_autopm_put_interface_async(dev->intf);
2986                 break;
2987         default:
2988                 usb_autopm_put_interface_async(dev->intf);
2989                 netif_dbg(dev, tx_err, dev->net,
2990                           "tx: submit urb err %d\n", ret);
2991                 break;
2992         }
2993
2994         spin_unlock_irqrestore(&dev->txq.lock, flags);
2995
2996         if (ret) {
2997                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2998 drop:
2999                 dev->net->stats.tx_dropped++;
3000                 if (skb)
3001                         dev_kfree_skb_any(skb);
3002                 usb_free_urb(urb);
3003         } else
3004                 netif_dbg(dev, tx_queued, dev->net,
3005                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3006 }
3007
3008 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3009 {
3010         struct urb *urb;
3011         int i;
3012
3013         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3014                 for (i = 0; i < 10; i++) {
3015                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3016                                 break;
3017                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3018                         if (urb)
3019                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3020                                         return;
3021                 }
3022
3023                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3024                         tasklet_schedule(&dev->bh);
3025         }
3026         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3027                 netif_wake_queue(dev->net);
3028 }
3029
3030 static void lan78xx_bh(unsigned long param)
3031 {
3032         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3033         struct sk_buff *skb;
3034         struct skb_data *entry;
3035
3036         while ((skb = skb_dequeue(&dev->done))) {
3037                 entry = (struct skb_data *)(skb->cb);
3038                 switch (entry->state) {
3039                 case rx_done:
3040                         entry->state = rx_cleanup;
3041                         rx_process(dev, skb);
3042                         continue;
3043                 case tx_done:
3044                         usb_free_urb(entry->urb);
3045                         dev_kfree_skb(skb);
3046                         continue;
3047                 case rx_cleanup:
3048                         usb_free_urb(entry->urb);
3049                         dev_kfree_skb(skb);
3050                         continue;
3051                 default:
3052                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3053                         return;
3054                 }
3055         }
3056
3057         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3058                 /* reset update timer delta */
3059                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3060                         dev->delta = 1;
3061                         mod_timer(&dev->stat_monitor,
3062                                   jiffies + STAT_UPDATE_TIMER);
3063                 }
3064
3065                 if (!skb_queue_empty(&dev->txq_pend))
3066                         lan78xx_tx_bh(dev);
3067
3068                 if (!timer_pending(&dev->delay) &&
3069                     !test_bit(EVENT_RX_HALT, &dev->flags))
3070                         lan78xx_rx_bh(dev);
3071         }
3072 }
3073
3074 static void lan78xx_delayedwork(struct work_struct *work)
3075 {
3076         int status;
3077         struct lan78xx_net *dev;
3078
3079         dev = container_of(work, struct lan78xx_net, wq.work);
3080
3081         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3082                 unlink_urbs(dev, &dev->txq);
3083                 status = usb_autopm_get_interface(dev->intf);
3084                 if (status < 0)
3085                         goto fail_pipe;
3086                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3087                 usb_autopm_put_interface(dev->intf);
3088                 if (status < 0 &&
3089                     status != -EPIPE &&
3090                     status != -ESHUTDOWN) {
3091                         if (netif_msg_tx_err(dev))
3092 fail_pipe:
3093                                 netdev_err(dev->net,
3094                                            "can't clear tx halt, status %d\n",
3095                                            status);
3096                 } else {
3097                         clear_bit(EVENT_TX_HALT, &dev->flags);
3098                         if (status != -ESHUTDOWN)
3099                                 netif_wake_queue(dev->net);
3100                 }
3101         }
3102         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3103                 unlink_urbs(dev, &dev->rxq);
3104                 status = usb_autopm_get_interface(dev->intf);
3105                 if (status < 0)
3106                                 goto fail_halt;
3107                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3108                 usb_autopm_put_interface(dev->intf);
3109                 if (status < 0 &&
3110                     status != -EPIPE &&
3111                     status != -ESHUTDOWN) {
3112                         if (netif_msg_rx_err(dev))
3113 fail_halt:
3114                                 netdev_err(dev->net,
3115                                            "can't clear rx halt, status %d\n",
3116                                            status);
3117                 } else {
3118                         clear_bit(EVENT_RX_HALT, &dev->flags);
3119                         tasklet_schedule(&dev->bh);
3120                 }
3121         }
3122
3123         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3124                 int ret = 0;
3125
3126                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3127                 status = usb_autopm_get_interface(dev->intf);
3128                 if (status < 0)
3129                         goto skip_reset;
3130                 if (lan78xx_link_reset(dev) < 0) {
3131                         usb_autopm_put_interface(dev->intf);
3132 skip_reset:
3133                         netdev_info(dev->net, "link reset failed (%d)\n",
3134                                     ret);
3135                 } else {
3136                         usb_autopm_put_interface(dev->intf);
3137                 }
3138         }
3139
3140         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3141                 lan78xx_update_stats(dev);
3142
3143                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3144
3145                 mod_timer(&dev->stat_monitor,
3146                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3147
3148                 dev->delta = min((dev->delta * 2), 50);
3149         }
3150 }
3151
3152 static void intr_complete(struct urb *urb)
3153 {
3154         struct lan78xx_net *dev = urb->context;
3155         int status = urb->status;
3156
3157         switch (status) {
3158         /* success */
3159         case 0:
3160                 lan78xx_status(dev, urb);
3161                 break;
3162
3163         /* software-driven interface shutdown */
3164         case -ENOENT:                   /* urb killed */
3165         case -ESHUTDOWN:                /* hardware gone */
3166                 netif_dbg(dev, ifdown, dev->net,
3167                           "intr shutdown, code %d\n", status);
3168                 return;
3169
3170         /* NOTE:  not throttling like RX/TX, since this endpoint
3171          * already polls infrequently
3172          */
3173         default:
3174                 netdev_dbg(dev->net, "intr status %d\n", status);
3175                 break;
3176         }
3177
3178         if (!netif_running(dev->net))
3179                 return;
3180
3181         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3182         status = usb_submit_urb(urb, GFP_ATOMIC);
3183         if (status != 0)
3184                 netif_err(dev, timer, dev->net,
3185                           "intr resubmit --> %d\n", status);
3186 }
3187
3188 static void lan78xx_disconnect(struct usb_interface *intf)
3189 {
3190         struct lan78xx_net              *dev;
3191         struct usb_device               *udev;
3192         struct net_device               *net;
3193
3194         dev = usb_get_intfdata(intf);
3195         usb_set_intfdata(intf, NULL);
3196         if (!dev)
3197                 return;
3198
3199         udev = interface_to_usbdev(intf);
3200
3201         net = dev->net;
3202         unregister_netdev(net);
3203
3204         cancel_delayed_work_sync(&dev->wq);
3205
3206         usb_scuttle_anchored_urbs(&dev->deferred);
3207
3208         lan78xx_unbind(dev, intf);
3209
3210         usb_kill_urb(dev->urb_intr);
3211         usb_free_urb(dev->urb_intr);
3212
3213         free_netdev(net);
3214         usb_put_dev(udev);
3215 }
3216
3217 static void lan78xx_tx_timeout(struct net_device *net)
3218 {
3219         struct lan78xx_net *dev = netdev_priv(net);
3220
3221         unlink_urbs(dev, &dev->txq);
3222         tasklet_schedule(&dev->bh);
3223 }
3224
3225 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3226                                                 struct net_device *netdev,
3227                                                 netdev_features_t features)
3228 {
3229         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3230                 features &= ~NETIF_F_GSO_MASK;
3231
3232         features = vlan_features_check(skb, features);
3233         features = vxlan_features_check(skb, features);
3234
3235         return features;
3236 }
3237
3238 static const struct net_device_ops lan78xx_netdev_ops = {
3239         .ndo_open               = lan78xx_open,
3240         .ndo_stop               = lan78xx_stop,
3241         .ndo_start_xmit         = lan78xx_start_xmit,
3242         .ndo_tx_timeout         = lan78xx_tx_timeout,
3243         .ndo_change_mtu         = lan78xx_change_mtu,
3244         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3245         .ndo_validate_addr      = eth_validate_addr,
3246         .ndo_do_ioctl           = lan78xx_ioctl,
3247         .ndo_set_rx_mode        = lan78xx_set_multicast,
3248         .ndo_set_features       = lan78xx_set_features,
3249         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3250         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3251         .ndo_features_check     = lan78xx_features_check,
3252 };
3253
3254 static void lan78xx_stat_monitor(unsigned long param)
3255 {
3256         struct lan78xx_net *dev;
3257
3258         dev = (struct lan78xx_net *)param;
3259
3260         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3261 }
3262
3263 static int lan78xx_probe(struct usb_interface *intf,
3264                          const struct usb_device_id *id)
3265 {
3266         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3267         struct lan78xx_net *dev;
3268         struct net_device *netdev;
3269         struct usb_device *udev;
3270         int ret;
3271         unsigned maxp;
3272         unsigned period;
3273         u8 *buf = NULL;
3274
3275         udev = interface_to_usbdev(intf);
3276         udev = usb_get_dev(udev);
3277
3278         ret = -ENOMEM;
3279         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3280         if (!netdev) {
3281                         dev_err(&intf->dev, "Error: OOM\n");
3282                         goto out1;
3283         }
3284
3285         /* netdev_printk() needs this */
3286         SET_NETDEV_DEV(netdev, &intf->dev);
3287
3288         dev = netdev_priv(netdev);
3289         dev->udev = udev;
3290         dev->intf = intf;
3291         dev->net = netdev;
3292         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3293                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3294
3295         skb_queue_head_init(&dev->rxq);
3296         skb_queue_head_init(&dev->txq);
3297         skb_queue_head_init(&dev->done);
3298         skb_queue_head_init(&dev->rxq_pause);
3299         skb_queue_head_init(&dev->txq_pend);
3300         mutex_init(&dev->phy_mutex);
3301
3302         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3303         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3304         init_usb_anchor(&dev->deferred);
3305
3306         netdev->netdev_ops = &lan78xx_netdev_ops;
3307         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3308         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3309
3310         dev->stat_monitor.function = lan78xx_stat_monitor;
3311         dev->stat_monitor.data = (unsigned long)dev;
3312         dev->delta = 1;
3313         init_timer(&dev->stat_monitor);
3314
3315         mutex_init(&dev->stats.access_lock);
3316
3317         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3318                 ret = -ENODEV;
3319                 goto out2;
3320         }
3321
3322         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3323         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3324         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3325                 ret = -ENODEV;
3326                 goto out2;
3327         }
3328
3329         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3330         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3331         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3332                 ret = -ENODEV;
3333                 goto out2;
3334         }
3335
3336         ep_intr = &intf->cur_altsetting->endpoint[2];
3337         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3338                 ret = -ENODEV;
3339                 goto out2;
3340         }
3341
3342         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3343                                         usb_endpoint_num(&ep_intr->desc));
3344
3345         ret = lan78xx_bind(dev, intf);
3346         if (ret < 0)
3347                 goto out2;
3348         strcpy(netdev->name, "eth%d");
3349
3350         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3351                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3352         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3353
3354         period = ep_intr->desc.bInterval;
3355         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3356         buf = kmalloc(maxp, GFP_KERNEL);
3357         if (buf) {
3358                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3359                 if (!dev->urb_intr) {
3360                         ret = -ENOMEM;
3361                         kfree(buf);
3362                         goto out3;
3363                 } else {
3364                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3365                                          dev->pipe_intr, buf, maxp,
3366                                          intr_complete, dev, period);
3367                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3368                 }
3369         }
3370
3371         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3372
3373         /* driver requires remote-wakeup capability during autosuspend. */
3374         intf->needs_remote_wakeup = 1;
3375
3376         ret = register_netdev(netdev);
3377         if (ret != 0) {
3378                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3379                 goto out2;
3380         }
3381
3382         usb_set_intfdata(intf, dev);
3383
3384         ret = device_set_wakeup_enable(&udev->dev, true);
3385
3386          /* Default delay of 2sec has more overhead than advantage.
3387           * Set to 10sec as default.
3388           */
3389         pm_runtime_set_autosuspend_delay(&udev->dev,
3390                                          DEFAULT_AUTOSUSPEND_DELAY);
3391
3392         return 0;
3393
3394 out3:
3395         lan78xx_unbind(dev, intf);
3396 out2:
3397         free_netdev(netdev);
3398 out1:
3399         usb_put_dev(udev);
3400
3401         return ret;
3402 }
3403
3404 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3405 {
3406         const u16 crc16poly = 0x8005;
3407         int i;
3408         u16 bit, crc, msb;
3409         u8 data;
3410
3411         crc = 0xFFFF;
3412         for (i = 0; i < len; i++) {
3413                 data = *buf++;
3414                 for (bit = 0; bit < 8; bit++) {
3415                         msb = crc >> 15;
3416                         crc <<= 1;
3417
3418                         if (msb ^ (u16)(data & 1)) {
3419                                 crc ^= crc16poly;
3420                                 crc |= (u16)0x0001U;
3421                         }
3422                         data >>= 1;
3423                 }
3424         }
3425
3426         return crc;
3427 }
3428
3429 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3430 {
3431         u32 buf;
3432         int ret;
3433         int mask_index;
3434         u16 crc;
3435         u32 temp_wucsr;
3436         u32 temp_pmt_ctl;
3437         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3438         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3439         const u8 arp_type[2] = { 0x08, 0x06 };
3440
3441         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3442         buf &= ~MAC_TX_TXEN_;
3443         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3444         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3445         buf &= ~MAC_RX_RXEN_;
3446         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3447
3448         ret = lan78xx_write_reg(dev, WUCSR, 0);
3449         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3450         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3451
3452         temp_wucsr = 0;
3453
3454         temp_pmt_ctl = 0;
3455         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3456         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3457         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3458
3459         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3460                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3461
3462         mask_index = 0;
3463         if (wol & WAKE_PHY) {
3464                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3465
3466                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3467                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3468                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3469         }
3470         if (wol & WAKE_MAGIC) {
3471                 temp_wucsr |= WUCSR_MPEN_;
3472
3473                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3474                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3475                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3476         }
3477         if (wol & WAKE_BCAST) {
3478                 temp_wucsr |= WUCSR_BCST_EN_;
3479
3480                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3481                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3482                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3483         }
3484         if (wol & WAKE_MCAST) {
3485                 temp_wucsr |= WUCSR_WAKE_EN_;
3486
3487                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3488                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3489                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3490                                         WUF_CFGX_EN_ |
3491                                         WUF_CFGX_TYPE_MCAST_ |
3492                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3493                                         (crc & WUF_CFGX_CRC16_MASK_));
3494
3495                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3496                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3497                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3498                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3499                 mask_index++;
3500
3501                 /* for IPv6 Multicast */
3502                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3503                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3504                                         WUF_CFGX_EN_ |
3505                                         WUF_CFGX_TYPE_MCAST_ |
3506                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3507                                         (crc & WUF_CFGX_CRC16_MASK_));
3508
3509                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3510                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3511                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3512                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3513                 mask_index++;
3514
3515                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3516                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3517                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3518         }
3519         if (wol & WAKE_UCAST) {
3520                 temp_wucsr |= WUCSR_PFDA_EN_;
3521
3522                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3523                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3524                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3525         }
3526         if (wol & WAKE_ARP) {
3527                 temp_wucsr |= WUCSR_WAKE_EN_;
3528
3529                 /* set WUF_CFG & WUF_MASK
3530                  * for packettype (offset 12,13) = ARP (0x0806)
3531                  */
3532                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3533                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3534                                         WUF_CFGX_EN_ |
3535                                         WUF_CFGX_TYPE_ALL_ |
3536                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3537                                         (crc & WUF_CFGX_CRC16_MASK_));
3538
3539                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3540                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3541                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3542                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3543                 mask_index++;
3544
3545                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3546                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3547                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3548         }
3549
3550         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3551
3552         /* when multiple WOL bits are set */
3553         if (hweight_long((unsigned long)wol) > 1) {
3554                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3555                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3556                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3557         }
3558         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3559
3560         /* clear WUPS */
3561         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3562         buf |= PMT_CTL_WUPS_MASK_;
3563         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3564
3565         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3566         buf |= MAC_RX_RXEN_;
3567         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3568
3569         return 0;
3570 }
3571
3572 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3573 {
3574         struct lan78xx_net *dev = usb_get_intfdata(intf);
3575         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3576         u32 buf;
3577         int ret;
3578         int event;
3579
3580         event = message.event;
3581
3582         if (!dev->suspend_count++) {
3583                 spin_lock_irq(&dev->txq.lock);
3584                 /* don't autosuspend while transmitting */
3585                 if ((skb_queue_len(&dev->txq) ||
3586                      skb_queue_len(&dev->txq_pend)) &&
3587                         PMSG_IS_AUTO(message)) {
3588                         spin_unlock_irq(&dev->txq.lock);
3589                         ret = -EBUSY;
3590                         goto out;
3591                 } else {
3592                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3593                         spin_unlock_irq(&dev->txq.lock);
3594                 }
3595
3596                 /* stop TX & RX */
3597                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3598                 buf &= ~MAC_TX_TXEN_;
3599                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3600                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3601                 buf &= ~MAC_RX_RXEN_;
3602                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3603
3604                 /* empty out the rx and queues */
3605                 netif_device_detach(dev->net);
3606                 lan78xx_terminate_urbs(dev);
3607                 usb_kill_urb(dev->urb_intr);
3608
3609                 /* reattach */
3610                 netif_device_attach(dev->net);
3611         }
3612
3613         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3614                 del_timer(&dev->stat_monitor);
3615
3616                 if (PMSG_IS_AUTO(message)) {
3617                         /* auto suspend (selective suspend) */
3618                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3619                         buf &= ~MAC_TX_TXEN_;
3620                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3621                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3622                         buf &= ~MAC_RX_RXEN_;
3623                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3624
3625                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3626                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3627                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3628
3629                         /* set goodframe wakeup */
3630                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3631
3632                         buf |= WUCSR_RFE_WAKE_EN_;
3633                         buf |= WUCSR_STORE_WAKE_;
3634
3635                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3636
3637                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3638
3639                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3640                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3641
3642                         buf |= PMT_CTL_PHY_WAKE_EN_;
3643                         buf |= PMT_CTL_WOL_EN_;
3644                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3645                         buf |= PMT_CTL_SUS_MODE_3_;
3646
3647                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3648
3649                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3650
3651                         buf |= PMT_CTL_WUPS_MASK_;
3652
3653                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3654
3655                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3656                         buf |= MAC_RX_RXEN_;
3657                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3658                 } else {
3659                         lan78xx_set_suspend(dev, pdata->wol);
3660                 }
3661         }
3662
3663         ret = 0;
3664 out:
3665         return ret;
3666 }
3667
3668 static int lan78xx_resume(struct usb_interface *intf)
3669 {
3670         struct lan78xx_net *dev = usb_get_intfdata(intf);
3671         struct sk_buff *skb;
3672         struct urb *res;
3673         int ret;
3674         u32 buf;
3675
3676         if (!timer_pending(&dev->stat_monitor)) {
3677                 dev->delta = 1;
3678                 mod_timer(&dev->stat_monitor,
3679                           jiffies + STAT_UPDATE_TIMER);
3680         }
3681
3682         if (!--dev->suspend_count) {
3683                 /* resume interrupt URBs */
3684                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3685                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3686
3687                 spin_lock_irq(&dev->txq.lock);
3688                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3689                         skb = (struct sk_buff *)res->context;
3690                         ret = usb_submit_urb(res, GFP_ATOMIC);
3691                         if (ret < 0) {
3692                                 dev_kfree_skb_any(skb);
3693                                 usb_free_urb(res);
3694                                 usb_autopm_put_interface_async(dev->intf);
3695                         } else {
3696                                 netif_trans_update(dev->net);
3697                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3698                         }
3699                 }
3700
3701                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3702                 spin_unlock_irq(&dev->txq.lock);
3703
3704                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3705                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3706                                 netif_start_queue(dev->net);
3707                         tasklet_schedule(&dev->bh);
3708                 }
3709         }
3710
3711         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3712         ret = lan78xx_write_reg(dev, WUCSR, 0);
3713         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3714
3715         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3716                                              WUCSR2_ARP_RCD_ |
3717                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3718                                              WUCSR2_IPV4_TCPSYN_RCD_);
3719
3720         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3721                                             WUCSR_EEE_RX_WAKE_ |
3722                                             WUCSR_PFDA_FR_ |
3723                                             WUCSR_RFE_WAKE_FR_ |
3724                                             WUCSR_WUFR_ |
3725                                             WUCSR_MPR_ |
3726                                             WUCSR_BCST_FR_);
3727
3728         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3729         buf |= MAC_TX_TXEN_;
3730         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3731
3732         return 0;
3733 }
3734
3735 static int lan78xx_reset_resume(struct usb_interface *intf)
3736 {
3737         struct lan78xx_net *dev = usb_get_intfdata(intf);
3738
3739         lan78xx_reset(dev);
3740
3741         lan78xx_phy_init(dev);
3742
3743         return lan78xx_resume(intf);
3744 }
3745
3746 static const struct usb_device_id products[] = {
3747         {
3748         /* LAN7800 USB Gigabit Ethernet Device */
3749         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3750         },
3751         {
3752         /* LAN7850 USB Gigabit Ethernet Device */
3753         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3754         },
3755         {},
3756 };
3757 MODULE_DEVICE_TABLE(usb, products);
3758
3759 static struct usb_driver lan78xx_driver = {
3760         .name                   = DRIVER_NAME,
3761         .id_table               = products,
3762         .probe                  = lan78xx_probe,
3763         .disconnect             = lan78xx_disconnect,
3764         .suspend                = lan78xx_suspend,
3765         .resume                 = lan78xx_resume,
3766         .reset_resume           = lan78xx_reset_resume,
3767         .supports_autosuspend   = 1,
3768         .disable_hub_initiated_lpm = 1,
3769 };
3770
3771 module_usb_driver(lan78xx_driver);
3772
3773 MODULE_AUTHOR(DRIVER_AUTHOR);
3774 MODULE_DESCRIPTION(DRIVER_DESC);
3775 MODULE_LICENSE("GPL");