GNU Linux-libre 4.4.287-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include "lan78xx.h"
36
37 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
38 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
39 #define DRIVER_NAME     "lan78xx"
40 #define DRIVER_VERSION  "1.0.1"
41
42 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
43 #define THROTTLE_JIFFIES                (HZ / 8)
44 #define UNLINK_TIMEOUT_MS               3
45
46 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
47
48 #define SS_USB_PKT_SIZE                 (1024)
49 #define HS_USB_PKT_SIZE                 (512)
50 #define FS_USB_PKT_SIZE                 (64)
51
52 #define MAX_RX_FIFO_SIZE                (12 * 1024)
53 #define MAX_TX_FIFO_SIZE                (12 * 1024)
54 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
55 #define DEFAULT_BULK_IN_DELAY           (0x0800)
56 #define MAX_SINGLE_PACKET_SIZE          (9000)
57 #define DEFAULT_TX_CSUM_ENABLE          (true)
58 #define DEFAULT_RX_CSUM_ENABLE          (true)
59 #define DEFAULT_TSO_CSUM_ENABLE         (true)
60 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
61 #define TX_OVERHEAD                     (8)
62 #define RXW_PADDING                     2
63
64 #define LAN78XX_USB_VENDOR_ID           (0x0424)
65 #define LAN7800_USB_PRODUCT_ID          (0x7800)
66 #define LAN7850_USB_PRODUCT_ID          (0x7850)
67 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
68 #define LAN78XX_OTP_MAGIC               (0x78F3)
69
70 #define MII_READ                        1
71 #define MII_WRITE                       0
72
73 #define EEPROM_INDICATOR                (0xA5)
74 #define EEPROM_MAC_OFFSET               (0x01)
75 #define MAX_EEPROM_SIZE                 512
76 #define OTP_INDICATOR_1                 (0xF3)
77 #define OTP_INDICATOR_2                 (0xF7)
78
79 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
80                                          WAKE_MCAST | WAKE_BCAST | \
81                                          WAKE_ARP | WAKE_MAGIC)
82
83 /* USB related defines */
84 #define BULK_IN_PIPE                    1
85 #define BULK_OUT_PIPE                   2
86
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
89
90 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
91         "RX FCS Errors",
92         "RX Alignment Errors",
93         "Rx Fragment Errors",
94         "RX Jabber Errors",
95         "RX Undersize Frame Errors",
96         "RX Oversize Frame Errors",
97         "RX Dropped Frames",
98         "RX Unicast Byte Count",
99         "RX Broadcast Byte Count",
100         "RX Multicast Byte Count",
101         "RX Unicast Frames",
102         "RX Broadcast Frames",
103         "RX Multicast Frames",
104         "RX Pause Frames",
105         "RX 64 Byte Frames",
106         "RX 65 - 127 Byte Frames",
107         "RX 128 - 255 Byte Frames",
108         "RX 256 - 511 Bytes Frames",
109         "RX 512 - 1023 Byte Frames",
110         "RX 1024 - 1518 Byte Frames",
111         "RX Greater 1518 Byte Frames",
112         "EEE RX LPI Transitions",
113         "EEE RX LPI Time",
114         "TX FCS Errors",
115         "TX Excess Deferral Errors",
116         "TX Carrier Errors",
117         "TX Bad Byte Count",
118         "TX Single Collisions",
119         "TX Multiple Collisions",
120         "TX Excessive Collision",
121         "TX Late Collisions",
122         "TX Unicast Byte Count",
123         "TX Broadcast Byte Count",
124         "TX Multicast Byte Count",
125         "TX Unicast Frames",
126         "TX Broadcast Frames",
127         "TX Multicast Frames",
128         "TX Pause Frames",
129         "TX 64 Byte Frames",
130         "TX 65 - 127 Byte Frames",
131         "TX 128 - 255 Byte Frames",
132         "TX 256 - 511 Bytes Frames",
133         "TX 512 - 1023 Byte Frames",
134         "TX 1024 - 1518 Byte Frames",
135         "TX Greater 1518 Byte Frames",
136         "EEE TX LPI Transitions",
137         "EEE TX LPI Time",
138 };
139
140 struct lan78xx_statstage {
141         u32 rx_fcs_errors;
142         u32 rx_alignment_errors;
143         u32 rx_fragment_errors;
144         u32 rx_jabber_errors;
145         u32 rx_undersize_frame_errors;
146         u32 rx_oversize_frame_errors;
147         u32 rx_dropped_frames;
148         u32 rx_unicast_byte_count;
149         u32 rx_broadcast_byte_count;
150         u32 rx_multicast_byte_count;
151         u32 rx_unicast_frames;
152         u32 rx_broadcast_frames;
153         u32 rx_multicast_frames;
154         u32 rx_pause_frames;
155         u32 rx_64_byte_frames;
156         u32 rx_65_127_byte_frames;
157         u32 rx_128_255_byte_frames;
158         u32 rx_256_511_bytes_frames;
159         u32 rx_512_1023_byte_frames;
160         u32 rx_1024_1518_byte_frames;
161         u32 rx_greater_1518_byte_frames;
162         u32 eee_rx_lpi_transitions;
163         u32 eee_rx_lpi_time;
164         u32 tx_fcs_errors;
165         u32 tx_excess_deferral_errors;
166         u32 tx_carrier_errors;
167         u32 tx_bad_byte_count;
168         u32 tx_single_collisions;
169         u32 tx_multiple_collisions;
170         u32 tx_excessive_collision;
171         u32 tx_late_collisions;
172         u32 tx_unicast_byte_count;
173         u32 tx_broadcast_byte_count;
174         u32 tx_multicast_byte_count;
175         u32 tx_unicast_frames;
176         u32 tx_broadcast_frames;
177         u32 tx_multicast_frames;
178         u32 tx_pause_frames;
179         u32 tx_64_byte_frames;
180         u32 tx_65_127_byte_frames;
181         u32 tx_128_255_byte_frames;
182         u32 tx_256_511_bytes_frames;
183         u32 tx_512_1023_byte_frames;
184         u32 tx_1024_1518_byte_frames;
185         u32 tx_greater_1518_byte_frames;
186         u32 eee_tx_lpi_transitions;
187         u32 eee_tx_lpi_time;
188 };
189
190 struct lan78xx_net;
191
192 struct lan78xx_priv {
193         struct lan78xx_net *dev;
194         u32 rfe_ctl;
195         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198         struct mutex dataport_mutex; /* for dataport access */
199         spinlock_t rfe_ctl_lock; /* for rfe register access */
200         struct work_struct set_multicast;
201         struct work_struct set_vlan;
202         u32 wol;
203 };
204
205 enum skb_state {
206         illegal = 0,
207         tx_start,
208         tx_done,
209         rx_start,
210         rx_done,
211         rx_cleanup,
212         unlink_start
213 };
214
215 struct skb_data {               /* skb->cb is one of these */
216         struct urb *urb;
217         struct lan78xx_net *dev;
218         enum skb_state state;
219         size_t length;
220 };
221
222 struct usb_context {
223         struct usb_ctrlrequest req;
224         struct lan78xx_net *dev;
225 };
226
227 #define EVENT_TX_HALT                   0
228 #define EVENT_RX_HALT                   1
229 #define EVENT_RX_MEMORY                 2
230 #define EVENT_STS_SPLIT                 3
231 #define EVENT_LINK_RESET                4
232 #define EVENT_RX_PAUSED                 5
233 #define EVENT_DEV_WAKING                6
234 #define EVENT_DEV_ASLEEP                7
235 #define EVENT_DEV_OPEN                  8
236
237 struct lan78xx_net {
238         struct net_device       *net;
239         struct usb_device       *udev;
240         struct usb_interface    *intf;
241         void                    *driver_priv;
242
243         int                     rx_qlen;
244         int                     tx_qlen;
245         struct sk_buff_head     rxq;
246         struct sk_buff_head     txq;
247         struct sk_buff_head     done;
248         struct sk_buff_head     rxq_pause;
249         struct sk_buff_head     txq_pend;
250
251         struct tasklet_struct   bh;
252         struct delayed_work     wq;
253
254         int                     msg_enable;
255
256         struct urb              *urb_intr;
257         struct usb_anchor       deferred;
258
259         struct mutex            phy_mutex; /* for phy access */
260         unsigned                pipe_in, pipe_out, pipe_intr;
261
262         u32                     hard_mtu;       /* count any extra framing */
263         size_t                  rx_urb_size;    /* size for rx urbs */
264
265         unsigned long           flags;
266
267         wait_queue_head_t       *wait;
268         unsigned char           suspend_count;
269
270         unsigned                maxpacket;
271         struct timer_list       delay;
272
273         unsigned long           data[5];
274
275         int                     link_on;
276         u8                      mdix_ctrl;
277
278         u32                     devid;
279         struct mii_bus          *mdiobus;
280 };
281
282 /* use ethtool to change the level for any given device */
283 static int msg_level = -1;
284 module_param(msg_level, int, 0);
285 MODULE_PARM_DESC(msg_level, "Override default message level");
286
287 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
288 {
289         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
290         int ret;
291
292         if (!buf)
293                 return -ENOMEM;
294
295         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
296                               USB_VENDOR_REQUEST_READ_REGISTER,
297                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
298                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
299         if (likely(ret >= 0)) {
300                 le32_to_cpus(buf);
301                 *data = *buf;
302         } else {
303                 netdev_warn(dev->net,
304                             "Failed to read register index 0x%08x. ret = %d",
305                             index, ret);
306         }
307
308         kfree(buf);
309
310         return ret;
311 }
312
313 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
314 {
315         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
316         int ret;
317
318         if (!buf)
319                 return -ENOMEM;
320
321         *buf = data;
322         cpu_to_le32s(buf);
323
324         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
325                               USB_VENDOR_REQUEST_WRITE_REGISTER,
326                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
327                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
328         if (unlikely(ret < 0)) {
329                 netdev_warn(dev->net,
330                             "Failed to write register index 0x%08x. ret = %d",
331                             index, ret);
332         }
333
334         kfree(buf);
335
336         return ret;
337 }
338
339 static int lan78xx_read_stats(struct lan78xx_net *dev,
340                               struct lan78xx_statstage *data)
341 {
342         int ret = 0;
343         int i;
344         struct lan78xx_statstage *stats;
345         u32 *src;
346         u32 *dst;
347
348         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
349         if (!stats)
350                 return -ENOMEM;
351
352         ret = usb_control_msg(dev->udev,
353                               usb_rcvctrlpipe(dev->udev, 0),
354                               USB_VENDOR_REQUEST_GET_STATS,
355                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
356                               0,
357                               0,
358                               (void *)stats,
359                               sizeof(*stats),
360                               USB_CTRL_SET_TIMEOUT);
361         if (likely(ret >= 0)) {
362                 src = (u32 *)stats;
363                 dst = (u32 *)data;
364                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
365                         le32_to_cpus(&src[i]);
366                         dst[i] = src[i];
367                 }
368         } else {
369                 netdev_warn(dev->net,
370                             "Failed to read stat ret = %d", ret);
371         }
372
373         kfree(stats);
374
375         return ret;
376 }
377
378 /* Loop until the read is completed with timeout called with phy_mutex held */
379 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
380 {
381         unsigned long start_time = jiffies;
382         u32 val;
383         int ret;
384
385         do {
386                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
387                 if (unlikely(ret < 0))
388                         return -EIO;
389
390                 if (!(val & MII_ACC_MII_BUSY_))
391                         return 0;
392         } while (!time_after(jiffies, start_time + HZ));
393
394         return -EIO;
395 }
396
397 static inline u32 mii_access(int id, int index, int read)
398 {
399         u32 ret;
400
401         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
402         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
403         if (read)
404                 ret |= MII_ACC_MII_READ_;
405         else
406                 ret |= MII_ACC_MII_WRITE_;
407         ret |= MII_ACC_MII_BUSY_;
408
409         return ret;
410 }
411
412 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
413 {
414         unsigned long start_time = jiffies;
415         u32 val;
416         int ret;
417
418         do {
419                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
420                 if (unlikely(ret < 0))
421                         return -EIO;
422
423                 if (!(val & E2P_CMD_EPC_BUSY_) ||
424                     (val & E2P_CMD_EPC_TIMEOUT_))
425                         break;
426                 usleep_range(40, 100);
427         } while (!time_after(jiffies, start_time + HZ));
428
429         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
430                 netdev_warn(dev->net, "EEPROM read operation timeout");
431                 return -EIO;
432         }
433
434         return 0;
435 }
436
437 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
438 {
439         unsigned long start_time = jiffies;
440         u32 val;
441         int ret;
442
443         do {
444                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
445                 if (unlikely(ret < 0))
446                         return -EIO;
447
448                 if (!(val & E2P_CMD_EPC_BUSY_))
449                         return 0;
450
451                 usleep_range(40, 100);
452         } while (!time_after(jiffies, start_time + HZ));
453
454         netdev_warn(dev->net, "EEPROM is busy");
455         return -EIO;
456 }
457
458 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
459                                    u32 length, u8 *data)
460 {
461         u32 val;
462         int i, ret;
463
464         ret = lan78xx_eeprom_confirm_not_busy(dev);
465         if (ret)
466                 return ret;
467
468         for (i = 0; i < length; i++) {
469                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
470                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
471                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
472                 if (unlikely(ret < 0))
473                         return -EIO;
474
475                 ret = lan78xx_wait_eeprom(dev);
476                 if (ret < 0)
477                         return ret;
478
479                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
480                 if (unlikely(ret < 0))
481                         return -EIO;
482
483                 data[i] = val & 0xFF;
484                 offset++;
485         }
486
487         return 0;
488 }
489
490 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
491                                u32 length, u8 *data)
492 {
493         u8 sig;
494         int ret;
495
496         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
497         if ((ret == 0) && (sig == EEPROM_INDICATOR))
498                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
499         else
500                 ret = -EINVAL;
501
502         return ret;
503 }
504
505 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
506                                     u32 length, u8 *data)
507 {
508         u32 val;
509         int i, ret;
510
511         ret = lan78xx_eeprom_confirm_not_busy(dev);
512         if (ret)
513                 return ret;
514
515         /* Issue write/erase enable command */
516         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
517         ret = lan78xx_write_reg(dev, E2P_CMD, val);
518         if (unlikely(ret < 0))
519                 return -EIO;
520
521         ret = lan78xx_wait_eeprom(dev);
522         if (ret < 0)
523                 return ret;
524
525         for (i = 0; i < length; i++) {
526                 /* Fill data register */
527                 val = data[i];
528                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
529                 if (ret < 0)
530                         return ret;
531
532                 /* Send "write" command */
533                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
534                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
535                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
536                 if (ret < 0)
537                         return ret;
538
539                 ret = lan78xx_wait_eeprom(dev);
540                 if (ret < 0)
541                         return ret;
542
543                 offset++;
544         }
545
546         return 0;
547 }
548
549 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
550                                 u32 length, u8 *data)
551 {
552         int i;
553         int ret;
554         u32 buf;
555         unsigned long timeout;
556
557         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
558
559         if (buf & OTP_PWR_DN_PWRDN_N_) {
560                 /* clear it and wait to be cleared */
561                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
562
563                 timeout = jiffies + HZ;
564                 do {
565                         usleep_range(1, 10);
566                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
567                         if (time_after(jiffies, timeout)) {
568                                 netdev_warn(dev->net,
569                                             "timeout on OTP_PWR_DN");
570                                 return -EIO;
571                         }
572                 } while (buf & OTP_PWR_DN_PWRDN_N_);
573         }
574
575         for (i = 0; i < length; i++) {
576                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
577                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
578                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
579                                         ((offset + i) & OTP_ADDR2_10_3));
580
581                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
582                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
583
584                 timeout = jiffies + HZ;
585                 do {
586                         udelay(1);
587                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
588                         if (time_after(jiffies, timeout)) {
589                                 netdev_warn(dev->net,
590                                             "timeout on OTP_STATUS");
591                                 return -EIO;
592                         }
593                 } while (buf & OTP_STATUS_BUSY_);
594
595                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
596
597                 data[i] = (u8)(buf & 0xFF);
598         }
599
600         return 0;
601 }
602
603 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
604                             u32 length, u8 *data)
605 {
606         u8 sig;
607         int ret;
608
609         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
610
611         if (ret == 0) {
612                 if (sig == OTP_INDICATOR_1)
613                         offset = offset;
614                 else if (sig == OTP_INDICATOR_2)
615                         offset += 0x100;
616                 else
617                         ret = -EINVAL;
618                 if (!ret)
619                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
620         }
621
622         return ret;
623 }
624
625 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
626 {
627         int i, ret;
628
629         for (i = 0; i < 100; i++) {
630                 u32 dp_sel;
631
632                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
633                 if (unlikely(ret < 0))
634                         return -EIO;
635
636                 if (dp_sel & DP_SEL_DPRDY_)
637                         return 0;
638
639                 usleep_range(40, 100);
640         }
641
642         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
643
644         return -EIO;
645 }
646
647 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
648                                   u32 addr, u32 length, u32 *buf)
649 {
650         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
651         u32 dp_sel;
652         int i, ret;
653
654         if (usb_autopm_get_interface(dev->intf) < 0)
655                         return 0;
656
657         mutex_lock(&pdata->dataport_mutex);
658
659         ret = lan78xx_dataport_wait_not_busy(dev);
660         if (ret < 0)
661                 goto done;
662
663         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
664
665         dp_sel &= ~DP_SEL_RSEL_MASK_;
666         dp_sel |= ram_select;
667         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
668
669         for (i = 0; i < length; i++) {
670                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
671
672                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
673
674                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
675
676                 ret = lan78xx_dataport_wait_not_busy(dev);
677                 if (ret < 0)
678                         goto done;
679         }
680
681 done:
682         mutex_unlock(&pdata->dataport_mutex);
683         usb_autopm_put_interface(dev->intf);
684
685         return ret;
686 }
687
688 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
689                                     int index, u8 addr[ETH_ALEN])
690 {
691         u32     temp;
692
693         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
694                 temp = addr[3];
695                 temp = addr[2] | (temp << 8);
696                 temp = addr[1] | (temp << 8);
697                 temp = addr[0] | (temp << 8);
698                 pdata->pfilter_table[index][1] = temp;
699                 temp = addr[5];
700                 temp = addr[4] | (temp << 8);
701                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
702                 pdata->pfilter_table[index][0] = temp;
703         }
704 }
705
706 /* returns hash bit number for given MAC address */
707 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
708 {
709         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
710 }
711
712 static void lan78xx_deferred_multicast_write(struct work_struct *param)
713 {
714         struct lan78xx_priv *pdata =
715                         container_of(param, struct lan78xx_priv, set_multicast);
716         struct lan78xx_net *dev = pdata->dev;
717         int i;
718         int ret;
719
720         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
721                   pdata->rfe_ctl);
722
723         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
724                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
725
726         for (i = 1; i < NUM_OF_MAF; i++) {
727                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
728                 ret = lan78xx_write_reg(dev, MAF_LO(i),
729                                         pdata->pfilter_table[i][1]);
730                 ret = lan78xx_write_reg(dev, MAF_HI(i),
731                                         pdata->pfilter_table[i][0]);
732         }
733
734         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
735 }
736
737 static void lan78xx_set_multicast(struct net_device *netdev)
738 {
739         struct lan78xx_net *dev = netdev_priv(netdev);
740         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
741         unsigned long flags;
742         int i;
743
744         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
745
746         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
747                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
748
749         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
750                         pdata->mchash_table[i] = 0;
751         /* pfilter_table[0] has own HW address */
752         for (i = 1; i < NUM_OF_MAF; i++) {
753                         pdata->pfilter_table[i][0] =
754                         pdata->pfilter_table[i][1] = 0;
755         }
756
757         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
758
759         if (dev->net->flags & IFF_PROMISC) {
760                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
761                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
762         } else {
763                 if (dev->net->flags & IFF_ALLMULTI) {
764                         netif_dbg(dev, drv, dev->net,
765                                   "receive all multicast enabled");
766                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
767                 }
768         }
769
770         if (netdev_mc_count(dev->net)) {
771                 struct netdev_hw_addr *ha;
772                 int i;
773
774                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
775
776                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
777
778                 i = 1;
779                 netdev_for_each_mc_addr(ha, netdev) {
780                         /* set first 32 into Perfect Filter */
781                         if (i < 33) {
782                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
783                         } else {
784                                 u32 bitnum = lan78xx_hash(ha->addr);
785
786                                 pdata->mchash_table[bitnum / 32] |=
787                                                         (1 << (bitnum % 32));
788                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
789                         }
790                         i++;
791                 }
792         }
793
794         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
795
796         /* defer register writes to a sleepable context */
797         schedule_work(&pdata->set_multicast);
798 }
799
800 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
801                                       u16 lcladv, u16 rmtadv)
802 {
803         u32 flow = 0, fct_flow = 0;
804         int ret;
805
806         u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
807
808         if (cap & FLOW_CTRL_TX)
809                 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
810
811         if (cap & FLOW_CTRL_RX)
812                 flow |= FLOW_CR_RX_FCEN_;
813
814         if (dev->udev->speed == USB_SPEED_SUPER)
815                 fct_flow = 0x817;
816         else if (dev->udev->speed == USB_SPEED_HIGH)
817                 fct_flow = 0x211;
818
819         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
820                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
821                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
822
823         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
824
825         /* threshold value should be set before enabling flow */
826         ret = lan78xx_write_reg(dev, FLOW, flow);
827
828         return 0;
829 }
830
831 static int lan78xx_link_reset(struct lan78xx_net *dev)
832 {
833         struct phy_device *phydev = dev->net->phydev;
834         struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
835         int ladv, radv, ret;
836         u32 buf;
837
838         /* clear PHY interrupt status */
839         ret = phy_read(phydev, LAN88XX_INT_STS);
840         if (unlikely(ret < 0))
841                 return -EIO;
842
843         /* clear LAN78xx interrupt status */
844         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
845         if (unlikely(ret < 0))
846                 return -EIO;
847
848         phy_read_status(phydev);
849
850         if (!phydev->link && dev->link_on) {
851                 dev->link_on = false;
852                 netif_carrier_off(dev->net);
853
854                 /* reset MAC */
855                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
856                 if (unlikely(ret < 0))
857                         return -EIO;
858                 buf |= MAC_CR_RST_;
859                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
860                 if (unlikely(ret < 0))
861                         return -EIO;
862         } else if (phydev->link && !dev->link_on) {
863                 dev->link_on = true;
864
865                 phy_ethtool_gset(phydev, &ecmd);
866
867                 ret = phy_read(phydev, LAN88XX_INT_STS);
868
869                 if (dev->udev->speed == USB_SPEED_SUPER) {
870                         if (ethtool_cmd_speed(&ecmd) == 1000) {
871                                 /* disable U2 */
872                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
873                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
874                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
875                                 /* enable U1 */
876                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
877                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
878                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
879                         } else {
880                                 /* enable U1 & U2 */
881                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
882                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
883                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
884                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
885                         }
886                 }
887
888                 ladv = phy_read(phydev, MII_ADVERTISE);
889                 if (ladv < 0)
890                         return ladv;
891
892                 radv = phy_read(phydev, MII_LPA);
893                 if (radv < 0)
894                         return radv;
895
896                 netif_dbg(dev, link, dev->net,
897                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
898                           ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
899
900                 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
901                 netif_carrier_on(dev->net);
902
903                 tasklet_schedule(&dev->bh);
904         }
905
906         return ret;
907 }
908
909 /* some work can't be done in tasklets, so we use keventd
910  *
911  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
912  * but tasklet_schedule() doesn't.      hope the failure is rare.
913  */
914 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915 {
916         set_bit(work, &dev->flags);
917         if (!schedule_delayed_work(&dev->wq, 0))
918                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919 }
920
921 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922 {
923         u32 intdata;
924
925         if (urb->actual_length != 4) {
926                 netdev_warn(dev->net,
927                             "unexpected urb length %d", urb->actual_length);
928                 return;
929         }
930
931         memcpy(&intdata, urb->transfer_buffer, 4);
932         le32_to_cpus(&intdata);
933
934         if (intdata & INT_ENP_PHY_INT) {
935                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936                           lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937         } else
938                 netdev_warn(dev->net,
939                             "unexpected interrupt: 0x%08x\n", intdata);
940 }
941
942 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943 {
944         return MAX_EEPROM_SIZE;
945 }
946
947 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948                                       struct ethtool_eeprom *ee, u8 *data)
949 {
950         struct lan78xx_net *dev = netdev_priv(netdev);
951
952         ee->magic = LAN78XX_EEPROM_MAGIC;
953
954         return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955 }
956
957 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958                                       struct ethtool_eeprom *ee, u8 *data)
959 {
960         struct lan78xx_net *dev = netdev_priv(netdev);
961
962         /* Allow entire eeprom update only */
963         if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964             (ee->offset == 0) &&
965             (ee->len == 512) &&
966             (data[0] == EEPROM_INDICATOR))
967                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969                  (ee->offset == 0) &&
970                  (ee->len == 512) &&
971                  (data[0] == OTP_INDICATOR_1))
972                 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974         return -EINVAL;
975 }
976
977 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978                                 u8 *data)
979 {
980         if (stringset == ETH_SS_STATS)
981                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982 }
983
984 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985 {
986         if (sset == ETH_SS_STATS)
987                 return ARRAY_SIZE(lan78xx_gstrings);
988         else
989                 return -EOPNOTSUPP;
990 }
991
992 static void lan78xx_get_stats(struct net_device *netdev,
993                               struct ethtool_stats *stats, u64 *data)
994 {
995         struct lan78xx_net *dev = netdev_priv(netdev);
996         struct lan78xx_statstage lan78xx_stat;
997         u32 *p;
998         int i;
999
1000         if (usb_autopm_get_interface(dev->intf) < 0)
1001                 return;
1002
1003         if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004                 p = (u32 *)&lan78xx_stat;
1005                 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006                         data[i] = p[i];
1007         }
1008
1009         usb_autopm_put_interface(dev->intf);
1010 }
1011
1012 static void lan78xx_get_wol(struct net_device *netdev,
1013                             struct ethtool_wolinfo *wol)
1014 {
1015         struct lan78xx_net *dev = netdev_priv(netdev);
1016         int ret;
1017         u32 buf;
1018         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020         if (usb_autopm_get_interface(dev->intf) < 0)
1021                         return;
1022
1023         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024         if (unlikely(ret < 0)) {
1025                 wol->supported = 0;
1026                 wol->wolopts = 0;
1027         } else {
1028                 if (buf & USB_CFG_RMT_WKP_) {
1029                         wol->supported = WAKE_ALL;
1030                         wol->wolopts = pdata->wol;
1031                 } else {
1032                         wol->supported = 0;
1033                         wol->wolopts = 0;
1034                 }
1035         }
1036
1037         usb_autopm_put_interface(dev->intf);
1038 }
1039
1040 static int lan78xx_set_wol(struct net_device *netdev,
1041                            struct ethtool_wolinfo *wol)
1042 {
1043         struct lan78xx_net *dev = netdev_priv(netdev);
1044         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045         int ret;
1046
1047         ret = usb_autopm_get_interface(dev->intf);
1048         if (ret < 0)
1049                 return ret;
1050
1051         if (wol->wolopts & ~WAKE_ALL)
1052                 return -EINVAL;
1053
1054         pdata->wol = wol->wolopts;
1055
1056         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1057
1058         phy_ethtool_set_wol(netdev->phydev, wol);
1059
1060         usb_autopm_put_interface(dev->intf);
1061
1062         return ret;
1063 }
1064
1065 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1066 {
1067         struct lan78xx_net *dev = netdev_priv(net);
1068         struct phy_device *phydev = net->phydev;
1069         int ret;
1070         u32 buf;
1071
1072         ret = usb_autopm_get_interface(dev->intf);
1073         if (ret < 0)
1074                 return ret;
1075
1076         ret = phy_ethtool_get_eee(phydev, edata);
1077         if (ret < 0)
1078                 goto exit;
1079
1080         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1081         if (buf & MAC_CR_EEE_EN_) {
1082                 edata->eee_enabled = true;
1083                 edata->eee_active = !!(edata->advertised &
1084                                        edata->lp_advertised);
1085                 edata->tx_lpi_enabled = true;
1086                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1087                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1088                 edata->tx_lpi_timer = buf;
1089         } else {
1090                 edata->eee_enabled = false;
1091                 edata->eee_active = false;
1092                 edata->tx_lpi_enabled = false;
1093                 edata->tx_lpi_timer = 0;
1094         }
1095
1096         ret = 0;
1097 exit:
1098         usb_autopm_put_interface(dev->intf);
1099
1100         return ret;
1101 }
1102
1103 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1104 {
1105         struct lan78xx_net *dev = netdev_priv(net);
1106         int ret;
1107         u32 buf;
1108
1109         ret = usb_autopm_get_interface(dev->intf);
1110         if (ret < 0)
1111                 return ret;
1112
1113         if (edata->eee_enabled) {
1114                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115                 buf |= MAC_CR_EEE_EN_;
1116                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1117
1118                 phy_ethtool_set_eee(net->phydev, edata);
1119
1120                 buf = (u32)edata->tx_lpi_timer;
1121                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1122         } else {
1123                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124                 buf &= ~MAC_CR_EEE_EN_;
1125                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126         }
1127
1128         usb_autopm_put_interface(dev->intf);
1129
1130         return 0;
1131 }
1132
1133 static u32 lan78xx_get_link(struct net_device *net)
1134 {
1135         phy_read_status(net->phydev);
1136
1137         return net->phydev->link;
1138 }
1139
1140 int lan78xx_nway_reset(struct net_device *net)
1141 {
1142         return phy_start_aneg(net->phydev);
1143 }
1144
1145 static void lan78xx_get_drvinfo(struct net_device *net,
1146                                 struct ethtool_drvinfo *info)
1147 {
1148         struct lan78xx_net *dev = netdev_priv(net);
1149
1150         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1151         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1152         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1153 }
1154
1155 static u32 lan78xx_get_msglevel(struct net_device *net)
1156 {
1157         struct lan78xx_net *dev = netdev_priv(net);
1158
1159         return dev->msg_enable;
1160 }
1161
1162 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1163 {
1164         struct lan78xx_net *dev = netdev_priv(net);
1165
1166         dev->msg_enable = level;
1167 }
1168
1169 static int lan78xx_get_mdix_status(struct net_device *net)
1170 {
1171         struct phy_device *phydev = net->phydev;
1172         int buf;
1173
1174         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1175         buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1176         phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1177
1178         return buf;
1179 }
1180
1181 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1182 {
1183         struct lan78xx_net *dev = netdev_priv(net);
1184         struct phy_device *phydev = net->phydev;
1185         int buf;
1186
1187         if (mdix_ctrl == ETH_TP_MDI) {
1188                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1189                           LAN88XX_EXT_PAGE_SPACE_1);
1190                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1191                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1192                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1193                           buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1194                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1195                           LAN88XX_EXT_PAGE_SPACE_0);
1196         } else if (mdix_ctrl == ETH_TP_MDI_X) {
1197                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198                           LAN88XX_EXT_PAGE_SPACE_1);
1199                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202                           buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1203                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204                           LAN88XX_EXT_PAGE_SPACE_0);
1205         } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1206                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207                           LAN88XX_EXT_PAGE_SPACE_1);
1208                 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209                 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210                 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211                           buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1212                 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213                           LAN88XX_EXT_PAGE_SPACE_0);
1214         }
1215         dev->mdix_ctrl = mdix_ctrl;
1216 }
1217
1218 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1219 {
1220         struct lan78xx_net *dev = netdev_priv(net);
1221         struct phy_device *phydev = net->phydev;
1222         int ret;
1223         int buf;
1224
1225         ret = usb_autopm_get_interface(dev->intf);
1226         if (ret < 0)
1227                 return ret;
1228
1229         ret = phy_ethtool_gset(phydev, cmd);
1230
1231         buf = lan78xx_get_mdix_status(net);
1232
1233         buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1234         if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1235                 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1236                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1237         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1238                 cmd->eth_tp_mdix = ETH_TP_MDI;
1239                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1240         } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1241                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1242                 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1243         }
1244
1245         usb_autopm_put_interface(dev->intf);
1246
1247         return ret;
1248 }
1249
1250 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1251 {
1252         struct lan78xx_net *dev = netdev_priv(net);
1253         struct phy_device *phydev = net->phydev;
1254         int ret = 0;
1255         int temp;
1256
1257         ret = usb_autopm_get_interface(dev->intf);
1258         if (ret < 0)
1259                 return ret;
1260
1261         if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1262                 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1263         }
1264
1265         /* change speed & duplex */
1266         ret = phy_ethtool_sset(phydev, cmd);
1267
1268         if (!cmd->autoneg) {
1269                 /* force link down */
1270                 temp = phy_read(phydev, MII_BMCR);
1271                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1272                 mdelay(1);
1273                 phy_write(phydev, MII_BMCR, temp);
1274         }
1275
1276         usb_autopm_put_interface(dev->intf);
1277
1278         return ret;
1279 }
1280
1281 static const struct ethtool_ops lan78xx_ethtool_ops = {
1282         .get_link       = lan78xx_get_link,
1283         .nway_reset     = lan78xx_nway_reset,
1284         .get_drvinfo    = lan78xx_get_drvinfo,
1285         .get_msglevel   = lan78xx_get_msglevel,
1286         .set_msglevel   = lan78xx_set_msglevel,
1287         .get_settings   = lan78xx_get_settings,
1288         .set_settings   = lan78xx_set_settings,
1289         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1290         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1291         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1292         .get_ethtool_stats = lan78xx_get_stats,
1293         .get_sset_count = lan78xx_get_sset_count,
1294         .get_strings    = lan78xx_get_strings,
1295         .get_wol        = lan78xx_get_wol,
1296         .set_wol        = lan78xx_set_wol,
1297         .get_eee        = lan78xx_get_eee,
1298         .set_eee        = lan78xx_set_eee,
1299 };
1300
1301 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1302 {
1303         if (!netif_running(netdev))
1304                 return -EINVAL;
1305
1306         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1307 }
1308
1309 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1310 {
1311         u32 addr_lo, addr_hi;
1312         int ret;
1313         u8 addr[6];
1314
1315         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1316         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1317
1318         addr[0] = addr_lo & 0xFF;
1319         addr[1] = (addr_lo >> 8) & 0xFF;
1320         addr[2] = (addr_lo >> 16) & 0xFF;
1321         addr[3] = (addr_lo >> 24) & 0xFF;
1322         addr[4] = addr_hi & 0xFF;
1323         addr[5] = (addr_hi >> 8) & 0xFF;
1324
1325         if (!is_valid_ether_addr(addr)) {
1326                 /* reading mac address from EEPROM or OTP */
1327                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1328                                          addr) == 0) ||
1329                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1330                                       addr) == 0)) {
1331                         if (is_valid_ether_addr(addr)) {
1332                                 /* eeprom values are valid so use them */
1333                                 netif_dbg(dev, ifup, dev->net,
1334                                           "MAC address read from EEPROM");
1335                         } else {
1336                                 /* generate random MAC */
1337                                 random_ether_addr(addr);
1338                                 netif_dbg(dev, ifup, dev->net,
1339                                           "MAC address set to random addr");
1340                         }
1341
1342                         addr_lo = addr[0] | (addr[1] << 8) |
1343                                   (addr[2] << 16) | (addr[3] << 24);
1344                         addr_hi = addr[4] | (addr[5] << 8);
1345
1346                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1347                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1348                 } else {
1349                         /* generate random MAC */
1350                         random_ether_addr(addr);
1351                         netif_dbg(dev, ifup, dev->net,
1352                                   "MAC address set to random addr");
1353                 }
1354         }
1355
1356         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1357         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1358
1359         ether_addr_copy(dev->net->dev_addr, addr);
1360 }
1361
1362 /* MDIO read and write wrappers for phylib */
1363 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1364 {
1365         struct lan78xx_net *dev = bus->priv;
1366         u32 val, addr;
1367         int ret;
1368
1369         ret = usb_autopm_get_interface(dev->intf);
1370         if (ret < 0)
1371                 return ret;
1372
1373         mutex_lock(&dev->phy_mutex);
1374
1375         /* confirm MII not busy */
1376         ret = lan78xx_phy_wait_not_busy(dev);
1377         if (ret < 0)
1378                 goto done;
1379
1380         /* set the address, index & direction (read from PHY) */
1381         addr = mii_access(phy_id, idx, MII_READ);
1382         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1383
1384         ret = lan78xx_phy_wait_not_busy(dev);
1385         if (ret < 0)
1386                 goto done;
1387
1388         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1389
1390         ret = (int)(val & 0xFFFF);
1391
1392 done:
1393         mutex_unlock(&dev->phy_mutex);
1394         usb_autopm_put_interface(dev->intf);
1395         return ret;
1396 }
1397
1398 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1399                                  u16 regval)
1400 {
1401         struct lan78xx_net *dev = bus->priv;
1402         u32 val, addr;
1403         int ret;
1404
1405         ret = usb_autopm_get_interface(dev->intf);
1406         if (ret < 0)
1407                 return ret;
1408
1409         mutex_lock(&dev->phy_mutex);
1410
1411         /* confirm MII not busy */
1412         ret = lan78xx_phy_wait_not_busy(dev);
1413         if (ret < 0)
1414                 goto done;
1415
1416         val = (u32)regval;
1417         ret = lan78xx_write_reg(dev, MII_DATA, val);
1418
1419         /* set the address, index & direction (write to PHY) */
1420         addr = mii_access(phy_id, idx, MII_WRITE);
1421         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1422
1423         ret = lan78xx_phy_wait_not_busy(dev);
1424         if (ret < 0)
1425                 goto done;
1426
1427 done:
1428         mutex_unlock(&dev->phy_mutex);
1429         usb_autopm_put_interface(dev->intf);
1430         return 0;
1431 }
1432
1433 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1434 {
1435         int ret;
1436         int i;
1437
1438         dev->mdiobus = mdiobus_alloc();
1439         if (!dev->mdiobus) {
1440                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1441                 return -ENOMEM;
1442         }
1443
1444         dev->mdiobus->priv = (void *)dev;
1445         dev->mdiobus->read = lan78xx_mdiobus_read;
1446         dev->mdiobus->write = lan78xx_mdiobus_write;
1447         dev->mdiobus->name = "lan78xx-mdiobus";
1448         dev->mdiobus->parent = &dev->udev->dev;
1449
1450         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1451                  dev->udev->bus->busnum, dev->udev->devnum);
1452
1453         dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1454         if (!dev->mdiobus->irq) {
1455                 ret = -ENOMEM;
1456                 goto exit1;
1457         }
1458
1459         /* handle our own interrupt */
1460         for (i = 0; i < PHY_MAX_ADDR; i++)
1461                 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1462
1463         switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1464         case 0x78000000:
1465         case 0x78500000:
1466                 /* set to internal PHY id */
1467                 dev->mdiobus->phy_mask = ~(1 << 1);
1468                 break;
1469         }
1470
1471         ret = mdiobus_register(dev->mdiobus);
1472         if (ret) {
1473                 netdev_err(dev->net, "can't register MDIO bus\n");
1474                 goto exit2;
1475         }
1476
1477         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1478         return 0;
1479 exit2:
1480         kfree(dev->mdiobus->irq);
1481 exit1:
1482         mdiobus_free(dev->mdiobus);
1483         return ret;
1484 }
1485
1486 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1487 {
1488         mdiobus_unregister(dev->mdiobus);
1489         kfree(dev->mdiobus->irq);
1490         mdiobus_free(dev->mdiobus);
1491 }
1492
1493 static void lan78xx_link_status_change(struct net_device *net)
1494 {
1495         /* nothing to do */
1496 }
1497
1498 static int lan78xx_phy_init(struct lan78xx_net *dev)
1499 {
1500         int ret;
1501         struct phy_device *phydev = dev->net->phydev;
1502
1503         phydev = phy_find_first(dev->mdiobus);
1504         if (!phydev) {
1505                 netdev_err(dev->net, "no PHY found\n");
1506                 return -EIO;
1507         }
1508
1509         ret = phy_connect_direct(dev->net, phydev,
1510                                  lan78xx_link_status_change,
1511                                  PHY_INTERFACE_MODE_GMII);
1512         if (ret) {
1513                 netdev_err(dev->net, "can't attach PHY to %s\n",
1514                            dev->mdiobus->id);
1515                 return -EIO;
1516         }
1517
1518         /* set to AUTOMDIX */
1519         lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1520
1521         /* MAC doesn't support 1000T Half */
1522         phydev->supported &= ~SUPPORTED_1000baseT_Half;
1523         phydev->supported |= (SUPPORTED_10baseT_Half |
1524                               SUPPORTED_10baseT_Full |
1525                               SUPPORTED_100baseT_Half |
1526                               SUPPORTED_100baseT_Full |
1527                               SUPPORTED_1000baseT_Full |
1528                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1529         genphy_config_aneg(phydev);
1530
1531         /* Workaround to enable PHY interrupt.
1532          * phy_start_interrupts() is API for requesting and enabling
1533          * PHY interrupt. However, USB-to-Ethernet device can't use
1534          * request_irq() called in phy_start_interrupts().
1535          * Set PHY to PHY_HALTED and call phy_start()
1536          * to make a call to phy_enable_interrupts()
1537          */
1538         phy_stop(phydev);
1539         phy_start(phydev);
1540
1541         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1542
1543         return 0;
1544 }
1545
1546 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1547 {
1548         int ret = 0;
1549         u32 buf;
1550         bool rxenabled;
1551
1552         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1553
1554         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1555
1556         if (rxenabled) {
1557                 buf &= ~MAC_RX_RXEN_;
1558                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1559         }
1560
1561         /* add 4 to size for FCS */
1562         buf &= ~MAC_RX_MAX_SIZE_MASK_;
1563         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1564
1565         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1566
1567         if (rxenabled) {
1568                 buf |= MAC_RX_RXEN_;
1569                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1570         }
1571
1572         return 0;
1573 }
1574
1575 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1576 {
1577         struct sk_buff *skb;
1578         unsigned long flags;
1579         int count = 0;
1580
1581         spin_lock_irqsave(&q->lock, flags);
1582         while (!skb_queue_empty(q)) {
1583                 struct skb_data *entry;
1584                 struct urb *urb;
1585                 int ret;
1586
1587                 skb_queue_walk(q, skb) {
1588                         entry = (struct skb_data *)skb->cb;
1589                         if (entry->state != unlink_start)
1590                                 goto found;
1591                 }
1592                 break;
1593 found:
1594                 entry->state = unlink_start;
1595                 urb = entry->urb;
1596
1597                 /* Get reference count of the URB to avoid it to be
1598                  * freed during usb_unlink_urb, which may trigger
1599                  * use-after-free problem inside usb_unlink_urb since
1600                  * usb_unlink_urb is always racing with .complete
1601                  * handler(include defer_bh).
1602                  */
1603                 usb_get_urb(urb);
1604                 spin_unlock_irqrestore(&q->lock, flags);
1605                 /* during some PM-driven resume scenarios,
1606                  * these (async) unlinks complete immediately
1607                  */
1608                 ret = usb_unlink_urb(urb);
1609                 if (ret != -EINPROGRESS && ret != 0)
1610                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1611                 else
1612                         count++;
1613                 usb_put_urb(urb);
1614                 spin_lock_irqsave(&q->lock, flags);
1615         }
1616         spin_unlock_irqrestore(&q->lock, flags);
1617         return count;
1618 }
1619
1620 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1621 {
1622         struct lan78xx_net *dev = netdev_priv(netdev);
1623         int ll_mtu = new_mtu + netdev->hard_header_len;
1624         int old_hard_mtu = dev->hard_mtu;
1625         int old_rx_urb_size = dev->rx_urb_size;
1626         int ret;
1627
1628         if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1629                 return -EINVAL;
1630
1631         if (new_mtu <= 0)
1632                 return -EINVAL;
1633         /* no second zero-length packet read wanted after mtu-sized packets */
1634         if ((ll_mtu % dev->maxpacket) == 0)
1635                 return -EDOM;
1636
1637         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1638
1639         netdev->mtu = new_mtu;
1640
1641         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1642         if (dev->rx_urb_size == old_hard_mtu) {
1643                 dev->rx_urb_size = dev->hard_mtu;
1644                 if (dev->rx_urb_size > old_rx_urb_size) {
1645                         if (netif_running(dev->net)) {
1646                                 unlink_urbs(dev, &dev->rxq);
1647                                 tasklet_schedule(&dev->bh);
1648                         }
1649                 }
1650         }
1651
1652         return 0;
1653 }
1654
1655 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1656 {
1657         struct lan78xx_net *dev = netdev_priv(netdev);
1658         struct sockaddr *addr = p;
1659         u32 addr_lo, addr_hi;
1660         int ret;
1661
1662         if (netif_running(netdev))
1663                 return -EBUSY;
1664
1665         if (!is_valid_ether_addr(addr->sa_data))
1666                 return -EADDRNOTAVAIL;
1667
1668         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1669
1670         addr_lo = netdev->dev_addr[0] |
1671                   netdev->dev_addr[1] << 8 |
1672                   netdev->dev_addr[2] << 16 |
1673                   netdev->dev_addr[3] << 24;
1674         addr_hi = netdev->dev_addr[4] |
1675                   netdev->dev_addr[5] << 8;
1676
1677         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1678         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1679
1680         return 0;
1681 }
1682
1683 /* Enable or disable Rx checksum offload engine */
1684 static int lan78xx_set_features(struct net_device *netdev,
1685                                 netdev_features_t features)
1686 {
1687         struct lan78xx_net *dev = netdev_priv(netdev);
1688         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1689         unsigned long flags;
1690         int ret;
1691
1692         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1693
1694         if (features & NETIF_F_RXCSUM) {
1695                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1696                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1697         } else {
1698                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1699                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1700         }
1701
1702         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1703                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1704         else
1705                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1706
1707         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1708
1709         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1710
1711         return 0;
1712 }
1713
1714 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1715 {
1716         struct lan78xx_priv *pdata =
1717                         container_of(param, struct lan78xx_priv, set_vlan);
1718         struct lan78xx_net *dev = pdata->dev;
1719
1720         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1721                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1722 }
1723
1724 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1725                                    __be16 proto, u16 vid)
1726 {
1727         struct lan78xx_net *dev = netdev_priv(netdev);
1728         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1729         u16 vid_bit_index;
1730         u16 vid_dword_index;
1731
1732         vid_dword_index = (vid >> 5) & 0x7F;
1733         vid_bit_index = vid & 0x1F;
1734
1735         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1736
1737         /* defer register writes to a sleepable context */
1738         schedule_work(&pdata->set_vlan);
1739
1740         return 0;
1741 }
1742
1743 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1744                                     __be16 proto, u16 vid)
1745 {
1746         struct lan78xx_net *dev = netdev_priv(netdev);
1747         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1748         u16 vid_bit_index;
1749         u16 vid_dword_index;
1750
1751         vid_dword_index = (vid >> 5) & 0x7F;
1752         vid_bit_index = vid & 0x1F;
1753
1754         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1755
1756         /* defer register writes to a sleepable context */
1757         schedule_work(&pdata->set_vlan);
1758
1759         return 0;
1760 }
1761
1762 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1763 {
1764         int ret;
1765         u32 buf;
1766         u32 regs[6] = { 0 };
1767
1768         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1769         if (buf & USB_CFG1_LTM_ENABLE_) {
1770                 u8 temp[2];
1771                 /* Get values from EEPROM first */
1772                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1773                         if (temp[0] == 24) {
1774                                 ret = lan78xx_read_raw_eeprom(dev,
1775                                                               temp[1] * 2,
1776                                                               24,
1777                                                               (u8 *)regs);
1778                                 if (ret < 0)
1779                                         return;
1780                         }
1781                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1782                         if (temp[0] == 24) {
1783                                 ret = lan78xx_read_raw_otp(dev,
1784                                                            temp[1] * 2,
1785                                                            24,
1786                                                            (u8 *)regs);
1787                                 if (ret < 0)
1788                                         return;
1789                         }
1790                 }
1791         }
1792
1793         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1794         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1795         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1796         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1797         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1798         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1799 }
1800
1801 static int lan78xx_reset(struct lan78xx_net *dev)
1802 {
1803         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1804         u32 buf;
1805         int ret = 0;
1806         unsigned long timeout;
1807
1808         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1809         buf |= HW_CFG_LRST_;
1810         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1811
1812         timeout = jiffies + HZ;
1813         do {
1814                 mdelay(1);
1815                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1816                 if (time_after(jiffies, timeout)) {
1817                         netdev_warn(dev->net,
1818                                     "timeout on completion of LiteReset");
1819                         return -EIO;
1820                 }
1821         } while (buf & HW_CFG_LRST_);
1822
1823         lan78xx_init_mac_address(dev);
1824
1825         /* save DEVID for later usage */
1826         ret = lan78xx_read_reg(dev, ID_REV, &buf);
1827         dev->devid = buf;
1828
1829         /* Respond to the IN token with a NAK */
1830         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1831         buf |= USB_CFG_BIR_;
1832         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1833
1834         /* Init LTM */
1835         lan78xx_init_ltm(dev);
1836
1837         dev->net->hard_header_len += TX_OVERHEAD;
1838         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1839
1840         if (dev->udev->speed == USB_SPEED_SUPER) {
1841                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1842                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1843                 dev->rx_qlen = 4;
1844                 dev->tx_qlen = 4;
1845         } else if (dev->udev->speed == USB_SPEED_HIGH) {
1846                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1847                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1848                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1849                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1850         } else {
1851                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1852                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1853                 dev->rx_qlen = 4;
1854                 dev->tx_qlen = 4;
1855         }
1856
1857         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1858         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1859
1860         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1861         buf |= HW_CFG_MEF_;
1862         ret = lan78xx_write_reg(dev, HW_CFG, buf);
1863
1864         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1865         buf |= USB_CFG_BCE_;
1866         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1867
1868         /* set FIFO sizes */
1869         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1870         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1871
1872         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1873         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1874
1875         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1876         ret = lan78xx_write_reg(dev, FLOW, 0);
1877         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1878
1879         /* Don't need rfe_ctl_lock during initialisation */
1880         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1881         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1882         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1883
1884         /* Enable or disable checksum offload engines */
1885         lan78xx_set_features(dev->net, dev->net->features);
1886
1887         lan78xx_set_multicast(dev->net);
1888
1889         /* reset PHY */
1890         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1891         buf |= PMT_CTL_PHY_RST_;
1892         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1893
1894         timeout = jiffies + HZ;
1895         do {
1896                 mdelay(1);
1897                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898                 if (time_after(jiffies, timeout)) {
1899                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
1900                         return -EIO;
1901                 }
1902         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1903
1904         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1905         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1906         ret = lan78xx_write_reg(dev, MAC_CR, buf);
1907
1908         /* enable PHY interrupts */
1909         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1910         buf |= INT_ENP_PHY_INT;
1911         ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1912
1913         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1914         buf |= MAC_TX_TXEN_;
1915         ret = lan78xx_write_reg(dev, MAC_TX, buf);
1916
1917         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1918         buf |= FCT_TX_CTL_EN_;
1919         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1920
1921         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1922
1923         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1924         buf |= MAC_RX_RXEN_;
1925         ret = lan78xx_write_reg(dev, MAC_RX, buf);
1926
1927         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1928         buf |= FCT_RX_CTL_EN_;
1929         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1930
1931         return 0;
1932 }
1933
1934 static int lan78xx_open(struct net_device *net)
1935 {
1936         struct lan78xx_net *dev = netdev_priv(net);
1937         int ret;
1938
1939         ret = usb_autopm_get_interface(dev->intf);
1940         if (ret < 0)
1941                 goto out;
1942
1943         ret = lan78xx_reset(dev);
1944         if (ret < 0)
1945                 goto done;
1946
1947         ret = lan78xx_phy_init(dev);
1948         if (ret < 0)
1949                 goto done;
1950
1951         /* for Link Check */
1952         if (dev->urb_intr) {
1953                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1954                 if (ret < 0) {
1955                         netif_err(dev, ifup, dev->net,
1956                                   "intr submit %d\n", ret);
1957                         goto done;
1958                 }
1959         }
1960
1961         set_bit(EVENT_DEV_OPEN, &dev->flags);
1962
1963         netif_start_queue(net);
1964
1965         dev->link_on = false;
1966
1967         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1968 done:
1969         usb_autopm_put_interface(dev->intf);
1970
1971 out:
1972         return ret;
1973 }
1974
1975 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1976 {
1977         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1978         DECLARE_WAITQUEUE(wait, current);
1979         int temp;
1980
1981         /* ensure there are no more active urbs */
1982         add_wait_queue(&unlink_wakeup, &wait);
1983         set_current_state(TASK_UNINTERRUPTIBLE);
1984         dev->wait = &unlink_wakeup;
1985         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1986
1987         /* maybe wait for deletions to finish. */
1988         while (!skb_queue_empty(&dev->rxq) &&
1989                !skb_queue_empty(&dev->txq) &&
1990                !skb_queue_empty(&dev->done)) {
1991                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1992                 set_current_state(TASK_UNINTERRUPTIBLE);
1993                 netif_dbg(dev, ifdown, dev->net,
1994                           "waited for %d urb completions\n", temp);
1995         }
1996         set_current_state(TASK_RUNNING);
1997         dev->wait = NULL;
1998         remove_wait_queue(&unlink_wakeup, &wait);
1999 }
2000
2001 int lan78xx_stop(struct net_device *net)
2002 {
2003         struct lan78xx_net              *dev = netdev_priv(net);
2004
2005         phy_stop(net->phydev);
2006         phy_disconnect(net->phydev);
2007         net->phydev = NULL;
2008
2009         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2010         netif_stop_queue(net);
2011
2012         netif_info(dev, ifdown, dev->net,
2013                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2014                    net->stats.rx_packets, net->stats.tx_packets,
2015                    net->stats.rx_errors, net->stats.tx_errors);
2016
2017         lan78xx_terminate_urbs(dev);
2018
2019         usb_kill_urb(dev->urb_intr);
2020
2021         skb_queue_purge(&dev->rxq_pause);
2022
2023         /* deferred work (task, timer, softirq) must also stop.
2024          * can't flush_scheduled_work() until we drop rtnl (later),
2025          * else workers could deadlock; so make workers a NOP.
2026          */
2027         dev->flags = 0;
2028         cancel_delayed_work_sync(&dev->wq);
2029         tasklet_kill(&dev->bh);
2030
2031         usb_autopm_put_interface(dev->intf);
2032
2033         return 0;
2034 }
2035
2036 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2037                                        struct sk_buff *skb, gfp_t flags)
2038 {
2039         u32 tx_cmd_a, tx_cmd_b;
2040
2041         if (skb_cow_head(skb, TX_OVERHEAD)) {
2042                 dev_kfree_skb_any(skb);
2043                 return NULL;
2044         }
2045
2046         if (skb_linearize(skb)) {
2047                 dev_kfree_skb_any(skb);
2048                 return NULL;
2049         }
2050
2051         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2052
2053         if (skb->ip_summed == CHECKSUM_PARTIAL)
2054                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2055
2056         tx_cmd_b = 0;
2057         if (skb_is_gso(skb)) {
2058                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2059
2060                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2061
2062                 tx_cmd_a |= TX_CMD_A_LSO_;
2063         }
2064
2065         if (skb_vlan_tag_present(skb)) {
2066                 tx_cmd_a |= TX_CMD_A_IVTG_;
2067                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2068         }
2069
2070         skb_push(skb, 4);
2071         cpu_to_le32s(&tx_cmd_b);
2072         memcpy(skb->data, &tx_cmd_b, 4);
2073
2074         skb_push(skb, 4);
2075         cpu_to_le32s(&tx_cmd_a);
2076         memcpy(skb->data, &tx_cmd_a, 4);
2077
2078         return skb;
2079 }
2080
2081 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2082                                struct sk_buff_head *list, enum skb_state state)
2083 {
2084         unsigned long flags;
2085         enum skb_state old_state;
2086         struct skb_data *entry = (struct skb_data *)skb->cb;
2087
2088         spin_lock_irqsave(&list->lock, flags);
2089         old_state = entry->state;
2090         entry->state = state;
2091
2092         __skb_unlink(skb, list);
2093         spin_unlock(&list->lock);
2094         spin_lock(&dev->done.lock);
2095
2096         __skb_queue_tail(&dev->done, skb);
2097         if (skb_queue_len(&dev->done) == 1)
2098                 tasklet_schedule(&dev->bh);
2099         spin_unlock_irqrestore(&dev->done.lock, flags);
2100
2101         return old_state;
2102 }
2103
2104 static void tx_complete(struct urb *urb)
2105 {
2106         struct sk_buff *skb = (struct sk_buff *)urb->context;
2107         struct skb_data *entry = (struct skb_data *)skb->cb;
2108         struct lan78xx_net *dev = entry->dev;
2109
2110         if (urb->status == 0) {
2111                 dev->net->stats.tx_packets++;
2112                 dev->net->stats.tx_bytes += entry->length;
2113         } else {
2114                 dev->net->stats.tx_errors++;
2115
2116                 switch (urb->status) {
2117                 case -EPIPE:
2118                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2119                         break;
2120
2121                 /* software-driven interface shutdown */
2122                 case -ECONNRESET:
2123                 case -ESHUTDOWN:
2124                         break;
2125
2126                 case -EPROTO:
2127                 case -ETIME:
2128                 case -EILSEQ:
2129                         netif_stop_queue(dev->net);
2130                         break;
2131                 default:
2132                         netif_dbg(dev, tx_err, dev->net,
2133                                   "tx err %d\n", entry->urb->status);
2134                         break;
2135                 }
2136         }
2137
2138         usb_autopm_put_interface_async(dev->intf);
2139
2140         defer_bh(dev, skb, &dev->txq, tx_done);
2141 }
2142
2143 static void lan78xx_queue_skb(struct sk_buff_head *list,
2144                               struct sk_buff *newsk, enum skb_state state)
2145 {
2146         struct skb_data *entry = (struct skb_data *)newsk->cb;
2147
2148         __skb_queue_tail(list, newsk);
2149         entry->state = state;
2150 }
2151
2152 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2153 {
2154         struct lan78xx_net *dev = netdev_priv(net);
2155         struct sk_buff *skb2 = NULL;
2156
2157         if (skb) {
2158                 skb_tx_timestamp(skb);
2159                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2160         }
2161
2162         if (skb2) {
2163                 skb_queue_tail(&dev->txq_pend, skb2);
2164
2165                 if (skb_queue_len(&dev->txq_pend) > 10)
2166                         netif_stop_queue(net);
2167         } else {
2168                 netif_dbg(dev, tx_err, dev->net,
2169                           "lan78xx_tx_prep return NULL\n");
2170                 dev->net->stats.tx_errors++;
2171                 dev->net->stats.tx_dropped++;
2172         }
2173
2174         tasklet_schedule(&dev->bh);
2175
2176         return NETDEV_TX_OK;
2177 }
2178
2179 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2180 {
2181         struct lan78xx_priv *pdata = NULL;
2182         int ret;
2183         int i;
2184
2185         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2186
2187         pdata = (struct lan78xx_priv *)(dev->data[0]);
2188         if (!pdata) {
2189                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2190                 return -ENOMEM;
2191         }
2192
2193         pdata->dev = dev;
2194
2195         spin_lock_init(&pdata->rfe_ctl_lock);
2196         mutex_init(&pdata->dataport_mutex);
2197
2198         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2199
2200         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2201                 pdata->vlan_table[i] = 0;
2202
2203         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2204
2205         dev->net->features = 0;
2206
2207         if (DEFAULT_TX_CSUM_ENABLE)
2208                 dev->net->features |= NETIF_F_HW_CSUM;
2209
2210         if (DEFAULT_RX_CSUM_ENABLE)
2211                 dev->net->features |= NETIF_F_RXCSUM;
2212
2213         if (DEFAULT_TSO_CSUM_ENABLE)
2214                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2215
2216         dev->net->hw_features = dev->net->features;
2217
2218         /* Init all registers */
2219         ret = lan78xx_reset(dev);
2220
2221         lan78xx_mdio_init(dev);
2222
2223         dev->net->flags |= IFF_MULTICAST;
2224
2225         pdata->wol = WAKE_MAGIC;
2226
2227         return 0;
2228 }
2229
2230 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2231 {
2232         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2233
2234         lan78xx_remove_mdio(dev);
2235
2236         if (pdata) {
2237                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2238                 kfree(pdata);
2239                 pdata = NULL;
2240                 dev->data[0] = 0;
2241         }
2242 }
2243
2244 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2245                                     struct sk_buff *skb,
2246                                     u32 rx_cmd_a, u32 rx_cmd_b)
2247 {
2248         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2249             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2250                 skb->ip_summed = CHECKSUM_NONE;
2251         } else {
2252                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2253                 skb->ip_summed = CHECKSUM_COMPLETE;
2254         }
2255 }
2256
2257 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2258 {
2259         int             status;
2260
2261         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2262                 skb_queue_tail(&dev->rxq_pause, skb);
2263                 return;
2264         }
2265
2266         skb->protocol = eth_type_trans(skb, dev->net);
2267         dev->net->stats.rx_packets++;
2268         dev->net->stats.rx_bytes += skb->len;
2269
2270         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2271                   skb->len + sizeof(struct ethhdr), skb->protocol);
2272         memset(skb->cb, 0, sizeof(struct skb_data));
2273
2274         if (skb_defer_rx_timestamp(skb))
2275                 return;
2276
2277         status = netif_rx(skb);
2278         if (status != NET_RX_SUCCESS)
2279                 netif_dbg(dev, rx_err, dev->net,
2280                           "netif_rx status %d\n", status);
2281 }
2282
2283 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2284 {
2285         if (skb->len < dev->net->hard_header_len)
2286                 return 0;
2287
2288         while (skb->len > 0) {
2289                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2290                 u16 rx_cmd_c;
2291                 struct sk_buff *skb2;
2292                 unsigned char *packet;
2293
2294                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2295                 le32_to_cpus(&rx_cmd_a);
2296                 skb_pull(skb, sizeof(rx_cmd_a));
2297
2298                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2299                 le32_to_cpus(&rx_cmd_b);
2300                 skb_pull(skb, sizeof(rx_cmd_b));
2301
2302                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2303                 le16_to_cpus(&rx_cmd_c);
2304                 skb_pull(skb, sizeof(rx_cmd_c));
2305
2306                 packet = skb->data;
2307
2308                 /* get the packet length */
2309                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2310                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2311
2312                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2313                         netif_dbg(dev, rx_err, dev->net,
2314                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2315                 } else {
2316                         /* last frame in this batch */
2317                         if (skb->len == size) {
2318                                 lan78xx_rx_csum_offload(dev, skb,
2319                                                         rx_cmd_a, rx_cmd_b);
2320
2321                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2322                                 skb->truesize = size + sizeof(struct sk_buff);
2323
2324                                 return 1;
2325                         }
2326
2327                         skb2 = skb_clone(skb, GFP_ATOMIC);
2328                         if (unlikely(!skb2)) {
2329                                 netdev_warn(dev->net, "Error allocating skb");
2330                                 return 0;
2331                         }
2332
2333                         skb2->len = size;
2334                         skb2->data = packet;
2335                         skb_set_tail_pointer(skb2, size);
2336
2337                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2338
2339                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2340                         skb2->truesize = size + sizeof(struct sk_buff);
2341
2342                         lan78xx_skb_return(dev, skb2);
2343                 }
2344
2345                 skb_pull(skb, size);
2346
2347                 /* padding bytes before the next frame starts */
2348                 if (skb->len)
2349                         skb_pull(skb, align_count);
2350         }
2351
2352         return 1;
2353 }
2354
2355 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2356 {
2357         if (!lan78xx_rx(dev, skb)) {
2358                 dev->net->stats.rx_errors++;
2359                 goto done;
2360         }
2361
2362         if (skb->len) {
2363                 lan78xx_skb_return(dev, skb);
2364                 return;
2365         }
2366
2367         netif_dbg(dev, rx_err, dev->net, "drop\n");
2368         dev->net->stats.rx_errors++;
2369 done:
2370         skb_queue_tail(&dev->done, skb);
2371 }
2372
2373 static void rx_complete(struct urb *urb);
2374
2375 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2376 {
2377         struct sk_buff *skb;
2378         struct skb_data *entry;
2379         unsigned long lockflags;
2380         size_t size = dev->rx_urb_size;
2381         int ret = 0;
2382
2383         skb = netdev_alloc_skb_ip_align(dev->net, size);
2384         if (!skb) {
2385                 usb_free_urb(urb);
2386                 return -ENOMEM;
2387         }
2388
2389         entry = (struct skb_data *)skb->cb;
2390         entry->urb = urb;
2391         entry->dev = dev;
2392         entry->length = 0;
2393
2394         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2395                           skb->data, size, rx_complete, skb);
2396
2397         spin_lock_irqsave(&dev->rxq.lock, lockflags);
2398
2399         if (netif_device_present(dev->net) &&
2400             netif_running(dev->net) &&
2401             !test_bit(EVENT_RX_HALT, &dev->flags) &&
2402             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2403                 ret = usb_submit_urb(urb, GFP_ATOMIC);
2404                 switch (ret) {
2405                 case 0:
2406                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2407                         break;
2408                 case -EPIPE:
2409                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2410                         break;
2411                 case -ENODEV:
2412                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
2413                         netif_device_detach(dev->net);
2414                         break;
2415                 case -EHOSTUNREACH:
2416                         ret = -ENOLINK;
2417                         break;
2418                 default:
2419                         netif_dbg(dev, rx_err, dev->net,
2420                                   "rx submit, %d\n", ret);
2421                         tasklet_schedule(&dev->bh);
2422                 }
2423         } else {
2424                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2425                 ret = -ENOLINK;
2426         }
2427         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2428         if (ret) {
2429                 dev_kfree_skb_any(skb);
2430                 usb_free_urb(urb);
2431         }
2432         return ret;
2433 }
2434
2435 static void rx_complete(struct urb *urb)
2436 {
2437         struct sk_buff  *skb = (struct sk_buff *)urb->context;
2438         struct skb_data *entry = (struct skb_data *)skb->cb;
2439         struct lan78xx_net *dev = entry->dev;
2440         int urb_status = urb->status;
2441         enum skb_state state;
2442
2443         skb_put(skb, urb->actual_length);
2444         state = rx_done;
2445         entry->urb = NULL;
2446
2447         switch (urb_status) {
2448         case 0:
2449                 if (skb->len < dev->net->hard_header_len) {
2450                         state = rx_cleanup;
2451                         dev->net->stats.rx_errors++;
2452                         dev->net->stats.rx_length_errors++;
2453                         netif_dbg(dev, rx_err, dev->net,
2454                                   "rx length %d\n", skb->len);
2455                 }
2456                 usb_mark_last_busy(dev->udev);
2457                 break;
2458         case -EPIPE:
2459                 dev->net->stats.rx_errors++;
2460                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2461                 /* FALLTHROUGH */
2462         case -ECONNRESET:                               /* async unlink */
2463         case -ESHUTDOWN:                                /* hardware gone */
2464                 netif_dbg(dev, ifdown, dev->net,
2465                           "rx shutdown, code %d\n", urb_status);
2466                 state = rx_cleanup;
2467                 entry->urb = urb;
2468                 urb = NULL;
2469                 break;
2470         case -EPROTO:
2471         case -ETIME:
2472         case -EILSEQ:
2473                 dev->net->stats.rx_errors++;
2474                 state = rx_cleanup;
2475                 entry->urb = urb;
2476                 urb = NULL;
2477                 break;
2478
2479         /* data overrun ... flush fifo? */
2480         case -EOVERFLOW:
2481                 dev->net->stats.rx_over_errors++;
2482                 /* FALLTHROUGH */
2483
2484         default:
2485                 state = rx_cleanup;
2486                 dev->net->stats.rx_errors++;
2487                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2488                 break;
2489         }
2490
2491         state = defer_bh(dev, skb, &dev->rxq, state);
2492
2493         if (urb) {
2494                 if (netif_running(dev->net) &&
2495                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
2496                     state != unlink_start) {
2497                         rx_submit(dev, urb, GFP_ATOMIC);
2498                         return;
2499                 }
2500                 usb_free_urb(urb);
2501         }
2502         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2503 }
2504
2505 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2506 {
2507         int length;
2508         struct urb *urb = NULL;
2509         struct skb_data *entry;
2510         unsigned long flags;
2511         struct sk_buff_head *tqp = &dev->txq_pend;
2512         struct sk_buff *skb, *skb2;
2513         int ret;
2514         int count, pos;
2515         int skb_totallen, pkt_cnt;
2516
2517         skb_totallen = 0;
2518         pkt_cnt = 0;
2519         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2520                 if (skb_is_gso(skb)) {
2521                         if (pkt_cnt) {
2522                                 /* handle previous packets first */
2523                                 break;
2524                         }
2525                         length = skb->len;
2526                         skb2 = skb_dequeue(tqp);
2527                         goto gso_skb;
2528                 }
2529
2530                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2531                         break;
2532                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2533                 pkt_cnt++;
2534         }
2535
2536         /* copy to a single skb */
2537         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2538         if (!skb)
2539                 goto drop;
2540
2541         skb_put(skb, skb_totallen);
2542
2543         for (count = pos = 0; count < pkt_cnt; count++) {
2544                 skb2 = skb_dequeue(tqp);
2545                 if (skb2) {
2546                         memcpy(skb->data + pos, skb2->data, skb2->len);
2547                         pos += roundup(skb2->len, sizeof(u32));
2548                         dev_kfree_skb(skb2);
2549                 }
2550         }
2551
2552         length = skb_totallen;
2553
2554 gso_skb:
2555         urb = usb_alloc_urb(0, GFP_ATOMIC);
2556         if (!urb) {
2557                 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2558                 goto drop;
2559         }
2560
2561         entry = (struct skb_data *)skb->cb;
2562         entry->urb = urb;
2563         entry->dev = dev;
2564         entry->length = length;
2565
2566         spin_lock_irqsave(&dev->txq.lock, flags);
2567         ret = usb_autopm_get_interface_async(dev->intf);
2568         if (ret < 0) {
2569                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2570                 goto drop;
2571         }
2572
2573         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2574                           skb->data, skb->len, tx_complete, skb);
2575
2576         if (length % dev->maxpacket == 0) {
2577                 /* send USB_ZERO_PACKET */
2578                 urb->transfer_flags |= URB_ZERO_PACKET;
2579         }
2580
2581 #ifdef CONFIG_PM
2582         /* if this triggers the device is still a sleep */
2583         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2584                 /* transmission will be done in resume */
2585                 usb_anchor_urb(urb, &dev->deferred);
2586                 /* no use to process more packets */
2587                 netif_stop_queue(dev->net);
2588                 usb_put_urb(urb);
2589                 spin_unlock_irqrestore(&dev->txq.lock, flags);
2590                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2591                 return;
2592         }
2593 #endif
2594
2595         ret = usb_submit_urb(urb, GFP_ATOMIC);
2596         switch (ret) {
2597         case 0:
2598                 dev->net->trans_start = jiffies;
2599                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2600                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2601                         netif_stop_queue(dev->net);
2602                 break;
2603         case -EPIPE:
2604                 netif_stop_queue(dev->net);
2605                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2606                 usb_autopm_put_interface_async(dev->intf);
2607                 break;
2608         default:
2609                 usb_autopm_put_interface_async(dev->intf);
2610                 netif_dbg(dev, tx_err, dev->net,
2611                           "tx: submit urb err %d\n", ret);
2612                 break;
2613         }
2614
2615         spin_unlock_irqrestore(&dev->txq.lock, flags);
2616
2617         if (ret) {
2618                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2619 drop:
2620                 dev->net->stats.tx_dropped++;
2621                 if (skb)
2622                         dev_kfree_skb_any(skb);
2623                 usb_free_urb(urb);
2624         } else
2625                 netif_dbg(dev, tx_queued, dev->net,
2626                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
2627 }
2628
2629 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2630 {
2631         struct urb *urb;
2632         int i;
2633
2634         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2635                 for (i = 0; i < 10; i++) {
2636                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2637                                 break;
2638                         urb = usb_alloc_urb(0, GFP_ATOMIC);
2639                         if (urb)
2640                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2641                                         return;
2642                 }
2643
2644                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2645                         tasklet_schedule(&dev->bh);
2646         }
2647         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2648                 netif_wake_queue(dev->net);
2649 }
2650
2651 static void lan78xx_bh(unsigned long param)
2652 {
2653         struct lan78xx_net *dev = (struct lan78xx_net *)param;
2654         struct sk_buff *skb;
2655         struct skb_data *entry;
2656
2657         while ((skb = skb_dequeue(&dev->done))) {
2658                 entry = (struct skb_data *)(skb->cb);
2659                 switch (entry->state) {
2660                 case rx_done:
2661                         entry->state = rx_cleanup;
2662                         rx_process(dev, skb);
2663                         continue;
2664                 case tx_done:
2665                         usb_free_urb(entry->urb);
2666                         dev_kfree_skb(skb);
2667                         continue;
2668                 case rx_cleanup:
2669                         usb_free_urb(entry->urb);
2670                         dev_kfree_skb(skb);
2671                         continue;
2672                 default:
2673                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
2674                         return;
2675                 }
2676         }
2677
2678         if (netif_device_present(dev->net) && netif_running(dev->net)) {
2679                 if (!skb_queue_empty(&dev->txq_pend))
2680                         lan78xx_tx_bh(dev);
2681
2682                 if (!timer_pending(&dev->delay) &&
2683                     !test_bit(EVENT_RX_HALT, &dev->flags))
2684                         lan78xx_rx_bh(dev);
2685         }
2686 }
2687
2688 static void lan78xx_delayedwork(struct work_struct *work)
2689 {
2690         int status;
2691         struct lan78xx_net *dev;
2692
2693         dev = container_of(work, struct lan78xx_net, wq.work);
2694
2695         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2696                 unlink_urbs(dev, &dev->txq);
2697                 status = usb_autopm_get_interface(dev->intf);
2698                 if (status < 0)
2699                         goto fail_pipe;
2700                 status = usb_clear_halt(dev->udev, dev->pipe_out);
2701                 usb_autopm_put_interface(dev->intf);
2702                 if (status < 0 &&
2703                     status != -EPIPE &&
2704                     status != -ESHUTDOWN) {
2705                         if (netif_msg_tx_err(dev))
2706 fail_pipe:
2707                                 netdev_err(dev->net,
2708                                            "can't clear tx halt, status %d\n",
2709                                            status);
2710                 } else {
2711                         clear_bit(EVENT_TX_HALT, &dev->flags);
2712                         if (status != -ESHUTDOWN)
2713                                 netif_wake_queue(dev->net);
2714                 }
2715         }
2716         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2717                 unlink_urbs(dev, &dev->rxq);
2718                 status = usb_autopm_get_interface(dev->intf);
2719                 if (status < 0)
2720                                 goto fail_halt;
2721                 status = usb_clear_halt(dev->udev, dev->pipe_in);
2722                 usb_autopm_put_interface(dev->intf);
2723                 if (status < 0 &&
2724                     status != -EPIPE &&
2725                     status != -ESHUTDOWN) {
2726                         if (netif_msg_rx_err(dev))
2727 fail_halt:
2728                                 netdev_err(dev->net,
2729                                            "can't clear rx halt, status %d\n",
2730                                            status);
2731                 } else {
2732                         clear_bit(EVENT_RX_HALT, &dev->flags);
2733                         tasklet_schedule(&dev->bh);
2734                 }
2735         }
2736
2737         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2738                 int ret = 0;
2739
2740                 clear_bit(EVENT_LINK_RESET, &dev->flags);
2741                 status = usb_autopm_get_interface(dev->intf);
2742                 if (status < 0)
2743                         goto skip_reset;
2744                 if (lan78xx_link_reset(dev) < 0) {
2745                         usb_autopm_put_interface(dev->intf);
2746 skip_reset:
2747                         netdev_info(dev->net, "link reset failed (%d)\n",
2748                                     ret);
2749                 } else {
2750                         usb_autopm_put_interface(dev->intf);
2751                 }
2752         }
2753 }
2754
2755 static void intr_complete(struct urb *urb)
2756 {
2757         struct lan78xx_net *dev = urb->context;
2758         int status = urb->status;
2759
2760         switch (status) {
2761         /* success */
2762         case 0:
2763                 lan78xx_status(dev, urb);
2764                 break;
2765
2766         /* software-driven interface shutdown */
2767         case -ENOENT:                   /* urb killed */
2768         case -ESHUTDOWN:                /* hardware gone */
2769                 netif_dbg(dev, ifdown, dev->net,
2770                           "intr shutdown, code %d\n", status);
2771                 return;
2772
2773         /* NOTE:  not throttling like RX/TX, since this endpoint
2774          * already polls infrequently
2775          */
2776         default:
2777                 netdev_dbg(dev->net, "intr status %d\n", status);
2778                 break;
2779         }
2780
2781         if (!netif_running(dev->net))
2782                 return;
2783
2784         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2785         status = usb_submit_urb(urb, GFP_ATOMIC);
2786         if (status != 0)
2787                 netif_err(dev, timer, dev->net,
2788                           "intr resubmit --> %d\n", status);
2789 }
2790
2791 static void lan78xx_disconnect(struct usb_interface *intf)
2792 {
2793         struct lan78xx_net              *dev;
2794         struct usb_device               *udev;
2795         struct net_device               *net;
2796
2797         dev = usb_get_intfdata(intf);
2798         usb_set_intfdata(intf, NULL);
2799         if (!dev)
2800                 return;
2801
2802         udev = interface_to_usbdev(intf);
2803
2804         net = dev->net;
2805         unregister_netdev(net);
2806
2807         cancel_delayed_work_sync(&dev->wq);
2808
2809         usb_scuttle_anchored_urbs(&dev->deferred);
2810
2811         lan78xx_unbind(dev, intf);
2812
2813         usb_kill_urb(dev->urb_intr);
2814         usb_free_urb(dev->urb_intr);
2815
2816         free_netdev(net);
2817         usb_put_dev(udev);
2818 }
2819
2820 void lan78xx_tx_timeout(struct net_device *net)
2821 {
2822         struct lan78xx_net *dev = netdev_priv(net);
2823
2824         unlink_urbs(dev, &dev->txq);
2825         tasklet_schedule(&dev->bh);
2826 }
2827
2828 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
2829                                                 struct net_device *netdev,
2830                                                 netdev_features_t features)
2831 {
2832         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
2833                 features &= ~NETIF_F_GSO_MASK;
2834
2835         features = vlan_features_check(skb, features);
2836         features = vxlan_features_check(skb, features);
2837
2838         return features;
2839 }
2840
2841 static const struct net_device_ops lan78xx_netdev_ops = {
2842         .ndo_open               = lan78xx_open,
2843         .ndo_stop               = lan78xx_stop,
2844         .ndo_start_xmit         = lan78xx_start_xmit,
2845         .ndo_tx_timeout         = lan78xx_tx_timeout,
2846         .ndo_change_mtu         = lan78xx_change_mtu,
2847         .ndo_set_mac_address    = lan78xx_set_mac_addr,
2848         .ndo_validate_addr      = eth_validate_addr,
2849         .ndo_do_ioctl           = lan78xx_ioctl,
2850         .ndo_set_rx_mode        = lan78xx_set_multicast,
2851         .ndo_set_features       = lan78xx_set_features,
2852         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
2853         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
2854         .ndo_features_check     = lan78xx_features_check,
2855 };
2856
2857 static int lan78xx_probe(struct usb_interface *intf,
2858                          const struct usb_device_id *id)
2859 {
2860         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
2861         struct lan78xx_net *dev;
2862         struct net_device *netdev;
2863         struct usb_device *udev;
2864         int ret;
2865         unsigned maxp;
2866         unsigned period;
2867         u8 *buf = NULL;
2868
2869         udev = interface_to_usbdev(intf);
2870         udev = usb_get_dev(udev);
2871
2872         ret = -ENOMEM;
2873         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2874         if (!netdev) {
2875                         dev_err(&intf->dev, "Error: OOM\n");
2876                         goto out1;
2877         }
2878
2879         /* netdev_printk() needs this */
2880         SET_NETDEV_DEV(netdev, &intf->dev);
2881
2882         dev = netdev_priv(netdev);
2883         dev->udev = udev;
2884         dev->intf = intf;
2885         dev->net = netdev;
2886         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2887                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2888
2889         skb_queue_head_init(&dev->rxq);
2890         skb_queue_head_init(&dev->txq);
2891         skb_queue_head_init(&dev->done);
2892         skb_queue_head_init(&dev->rxq_pause);
2893         skb_queue_head_init(&dev->txq_pend);
2894         mutex_init(&dev->phy_mutex);
2895
2896         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2897         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2898         init_usb_anchor(&dev->deferred);
2899
2900         netdev->netdev_ops = &lan78xx_netdev_ops;
2901         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2902         netdev->ethtool_ops = &lan78xx_ethtool_ops;
2903
2904         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
2905                 ret = -ENODEV;
2906                 goto out2;
2907         }
2908
2909         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2910         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
2911         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
2912                 ret = -ENODEV;
2913                 goto out2;
2914         }
2915
2916         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2917         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
2918         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
2919                 ret = -ENODEV;
2920                 goto out2;
2921         }
2922
2923         ep_intr = &intf->cur_altsetting->endpoint[2];
2924         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
2925                 ret = -ENODEV;
2926                 goto out2;
2927         }
2928
2929         dev->pipe_intr = usb_rcvintpipe(dev->udev,
2930                                         usb_endpoint_num(&ep_intr->desc));
2931
2932         ret = lan78xx_bind(dev, intf);
2933         if (ret < 0)
2934                 goto out2;
2935         strcpy(netdev->name, "eth%d");
2936
2937         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2938                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2939         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
2940
2941         period = ep_intr->desc.bInterval;
2942         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2943         buf = kmalloc(maxp, GFP_KERNEL);
2944         if (buf) {
2945                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2946                 if (!dev->urb_intr) {
2947                         kfree(buf);
2948                         goto out3;
2949                 } else {
2950                         usb_fill_int_urb(dev->urb_intr, dev->udev,
2951                                          dev->pipe_intr, buf, maxp,
2952                                          intr_complete, dev, period);
2953                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
2954                 }
2955         }
2956
2957         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2958
2959         /* driver requires remote-wakeup capability during autosuspend. */
2960         intf->needs_remote_wakeup = 1;
2961
2962         ret = register_netdev(netdev);
2963         if (ret != 0) {
2964                 netif_err(dev, probe, netdev, "couldn't register the device\n");
2965                 goto out2;
2966         }
2967
2968         usb_set_intfdata(intf, dev);
2969
2970         ret = device_set_wakeup_enable(&udev->dev, true);
2971
2972          /* Default delay of 2sec has more overhead than advantage.
2973           * Set to 10sec as default.
2974           */
2975         pm_runtime_set_autosuspend_delay(&udev->dev,
2976                                          DEFAULT_AUTOSUSPEND_DELAY);
2977
2978         return 0;
2979
2980 out3:
2981         lan78xx_unbind(dev, intf);
2982 out2:
2983         free_netdev(netdev);
2984 out1:
2985         usb_put_dev(udev);
2986
2987         return ret;
2988 }
2989
2990 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
2991 {
2992         const u16 crc16poly = 0x8005;
2993         int i;
2994         u16 bit, crc, msb;
2995         u8 data;
2996
2997         crc = 0xFFFF;
2998         for (i = 0; i < len; i++) {
2999                 data = *buf++;
3000                 for (bit = 0; bit < 8; bit++) {
3001                         msb = crc >> 15;
3002                         crc <<= 1;
3003
3004                         if (msb ^ (u16)(data & 1)) {
3005                                 crc ^= crc16poly;
3006                                 crc |= (u16)0x0001U;
3007                         }
3008                         data >>= 1;
3009                 }
3010         }
3011
3012         return crc;
3013 }
3014
3015 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3016 {
3017         u32 buf;
3018         int ret;
3019         int mask_index;
3020         u16 crc;
3021         u32 temp_wucsr;
3022         u32 temp_pmt_ctl;
3023         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3024         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3025         const u8 arp_type[2] = { 0x08, 0x06 };
3026
3027         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3028         buf &= ~MAC_TX_TXEN_;
3029         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3030         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3031         buf &= ~MAC_RX_RXEN_;
3032         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3033
3034         ret = lan78xx_write_reg(dev, WUCSR, 0);
3035         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3036         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3037
3038         temp_wucsr = 0;
3039
3040         temp_pmt_ctl = 0;
3041         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3042         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3043         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3044
3045         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3046                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3047
3048         mask_index = 0;
3049         if (wol & WAKE_PHY) {
3050                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3051
3052                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3053                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3054                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3055         }
3056         if (wol & WAKE_MAGIC) {
3057                 temp_wucsr |= WUCSR_MPEN_;
3058
3059                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3060                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3061                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3062         }
3063         if (wol & WAKE_BCAST) {
3064                 temp_wucsr |= WUCSR_BCST_EN_;
3065
3066                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3067                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3068                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3069         }
3070         if (wol & WAKE_MCAST) {
3071                 temp_wucsr |= WUCSR_WAKE_EN_;
3072
3073                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3074                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3075                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3076                                         WUF_CFGX_EN_ |
3077                                         WUF_CFGX_TYPE_MCAST_ |
3078                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3079                                         (crc & WUF_CFGX_CRC16_MASK_));
3080
3081                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3082                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3083                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3084                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3085                 mask_index++;
3086
3087                 /* for IPv6 Multicast */
3088                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3089                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3090                                         WUF_CFGX_EN_ |
3091                                         WUF_CFGX_TYPE_MCAST_ |
3092                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3093                                         (crc & WUF_CFGX_CRC16_MASK_));
3094
3095                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3096                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3097                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3098                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3099                 mask_index++;
3100
3101                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3102                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3103                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3104         }
3105         if (wol & WAKE_UCAST) {
3106                 temp_wucsr |= WUCSR_PFDA_EN_;
3107
3108                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3109                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3110                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3111         }
3112         if (wol & WAKE_ARP) {
3113                 temp_wucsr |= WUCSR_WAKE_EN_;
3114
3115                 /* set WUF_CFG & WUF_MASK
3116                  * for packettype (offset 12,13) = ARP (0x0806)
3117                  */
3118                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3119                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3120                                         WUF_CFGX_EN_ |
3121                                         WUF_CFGX_TYPE_ALL_ |
3122                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3123                                         (crc & WUF_CFGX_CRC16_MASK_));
3124
3125                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3126                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3127                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3128                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3129                 mask_index++;
3130
3131                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3132                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3133                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3134         }
3135
3136         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3137
3138         /* when multiple WOL bits are set */
3139         if (hweight_long((unsigned long)wol) > 1) {
3140                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3141                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3142                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3143         }
3144         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3145
3146         /* clear WUPS */
3147         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3148         buf |= PMT_CTL_WUPS_MASK_;
3149         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3150
3151         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3152         buf |= MAC_RX_RXEN_;
3153         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3154
3155         return 0;
3156 }
3157
3158 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3159 {
3160         struct lan78xx_net *dev = usb_get_intfdata(intf);
3161         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3162         u32 buf;
3163         int ret;
3164         int event;
3165
3166         event = message.event;
3167
3168         if (!dev->suspend_count++) {
3169                 spin_lock_irq(&dev->txq.lock);
3170                 /* don't autosuspend while transmitting */
3171                 if ((skb_queue_len(&dev->txq) ||
3172                      skb_queue_len(&dev->txq_pend)) &&
3173                         PMSG_IS_AUTO(message)) {
3174                         spin_unlock_irq(&dev->txq.lock);
3175                         ret = -EBUSY;
3176                         goto out;
3177                 } else {
3178                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3179                         spin_unlock_irq(&dev->txq.lock);
3180                 }
3181
3182                 /* stop TX & RX */
3183                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3184                 buf &= ~MAC_TX_TXEN_;
3185                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3186                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3187                 buf &= ~MAC_RX_RXEN_;
3188                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3189
3190                 /* empty out the rx and queues */
3191                 netif_device_detach(dev->net);
3192                 lan78xx_terminate_urbs(dev);
3193                 usb_kill_urb(dev->urb_intr);
3194
3195                 /* reattach */
3196                 netif_device_attach(dev->net);
3197         }
3198
3199         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3200                 if (PMSG_IS_AUTO(message)) {
3201                         /* auto suspend (selective suspend) */
3202                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3203                         buf &= ~MAC_TX_TXEN_;
3204                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3205                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3206                         buf &= ~MAC_RX_RXEN_;
3207                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3208
3209                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3210                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3211                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3212
3213                         /* set goodframe wakeup */
3214                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3215
3216                         buf |= WUCSR_RFE_WAKE_EN_;
3217                         buf |= WUCSR_STORE_WAKE_;
3218
3219                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3220
3221                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3222
3223                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3224                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3225
3226                         buf |= PMT_CTL_PHY_WAKE_EN_;
3227                         buf |= PMT_CTL_WOL_EN_;
3228                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3229                         buf |= PMT_CTL_SUS_MODE_3_;
3230
3231                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3232
3233                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3234
3235                         buf |= PMT_CTL_WUPS_MASK_;
3236
3237                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3238
3239                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3240                         buf |= MAC_RX_RXEN_;
3241                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3242                 } else {
3243                         lan78xx_set_suspend(dev, pdata->wol);
3244                 }
3245         }
3246
3247         ret = 0;
3248 out:
3249         return ret;
3250 }
3251
3252 int lan78xx_resume(struct usb_interface *intf)
3253 {
3254         struct lan78xx_net *dev = usb_get_intfdata(intf);
3255         struct sk_buff *skb;
3256         struct urb *res;
3257         int ret;
3258         u32 buf;
3259
3260         if (!--dev->suspend_count) {
3261                 /* resume interrupt URBs */
3262                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3263                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3264
3265                 spin_lock_irq(&dev->txq.lock);
3266                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3267                         skb = (struct sk_buff *)res->context;
3268                         ret = usb_submit_urb(res, GFP_ATOMIC);
3269                         if (ret < 0) {
3270                                 dev_kfree_skb_any(skb);
3271                                 usb_free_urb(res);
3272                                 usb_autopm_put_interface_async(dev->intf);
3273                         } else {
3274                                 dev->net->trans_start = jiffies;
3275                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3276                         }
3277                 }
3278
3279                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3280                 spin_unlock_irq(&dev->txq.lock);
3281
3282                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3283                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3284                                 netif_start_queue(dev->net);
3285                         tasklet_schedule(&dev->bh);
3286                 }
3287         }
3288
3289         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3290         ret = lan78xx_write_reg(dev, WUCSR, 0);
3291         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3292
3293         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3294                                              WUCSR2_ARP_RCD_ |
3295                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3296                                              WUCSR2_IPV4_TCPSYN_RCD_);
3297
3298         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3299                                             WUCSR_EEE_RX_WAKE_ |
3300                                             WUCSR_PFDA_FR_ |
3301                                             WUCSR_RFE_WAKE_FR_ |
3302                                             WUCSR_WUFR_ |
3303                                             WUCSR_MPR_ |
3304                                             WUCSR_BCST_FR_);
3305
3306         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3307         buf |= MAC_TX_TXEN_;
3308         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3309
3310         return 0;
3311 }
3312
3313 int lan78xx_reset_resume(struct usb_interface *intf)
3314 {
3315         struct lan78xx_net *dev = usb_get_intfdata(intf);
3316
3317         lan78xx_reset(dev);
3318
3319         lan78xx_phy_init(dev);
3320
3321         return lan78xx_resume(intf);
3322 }
3323
3324 static const struct usb_device_id products[] = {
3325         {
3326         /* LAN7800 USB Gigabit Ethernet Device */
3327         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3328         },
3329         {
3330         /* LAN7850 USB Gigabit Ethernet Device */
3331         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3332         },
3333         {},
3334 };
3335 MODULE_DEVICE_TABLE(usb, products);
3336
3337 static struct usb_driver lan78xx_driver = {
3338         .name                   = DRIVER_NAME,
3339         .id_table               = products,
3340         .probe                  = lan78xx_probe,
3341         .disconnect             = lan78xx_disconnect,
3342         .suspend                = lan78xx_suspend,
3343         .resume                 = lan78xx_resume,
3344         .reset_resume           = lan78xx_reset_resume,
3345         .supports_autosuspend   = 1,
3346         .disable_hub_initiated_lpm = 1,
3347 };
3348
3349 module_usb_driver(lan78xx_driver);
3350
3351 MODULE_AUTHOR(DRIVER_AUTHOR);
3352 MODULE_DESCRIPTION(DRIVER_DESC);
3353 MODULE_LICENSE("GPL");