GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33
34 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME     "lan78xx"
37
38 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
39 #define THROTTLE_JIFFIES                (HZ / 8)
40 #define UNLINK_TIMEOUT_MS               3
41
42 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
43
44 #define SS_USB_PKT_SIZE                 (1024)
45 #define HS_USB_PKT_SIZE                 (512)
46 #define FS_USB_PKT_SIZE                 (64)
47
48 #define MAX_RX_FIFO_SIZE                (12 * 1024)
49 #define MAX_TX_FIFO_SIZE                (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY           (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE          (9000)
53 #define DEFAULT_TX_CSUM_ENABLE          (true)
54 #define DEFAULT_RX_CSUM_ENABLE          (true)
55 #define DEFAULT_TSO_CSUM_ENABLE         (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
58 #define TX_OVERHEAD                     (8)
59 #define RXW_PADDING                     2
60
61 #define LAN78XX_USB_VENDOR_ID           (0x0424)
62 #define LAN7800_USB_PRODUCT_ID          (0x7800)
63 #define LAN7850_USB_PRODUCT_ID          (0x7850)
64 #define LAN7801_USB_PRODUCT_ID          (0x7801)
65 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
66 #define LAN78XX_OTP_MAGIC               (0x78F3)
67 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
68 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
69
70 #define MII_READ                        1
71 #define MII_WRITE                       0
72
73 #define EEPROM_INDICATOR                (0xA5)
74 #define EEPROM_MAC_OFFSET               (0x01)
75 #define MAX_EEPROM_SIZE                 512
76 #define OTP_INDICATOR_1                 (0xF3)
77 #define OTP_INDICATOR_2                 (0xF7)
78
79 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
80                                          WAKE_MCAST | WAKE_BCAST | \
81                                          WAKE_ARP | WAKE_MAGIC)
82
83 /* USB related defines */
84 #define BULK_IN_PIPE                    1
85 #define BULK_OUT_PIPE                   2
86
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
89
90 /* statistic update interval (mSec) */
91 #define STAT_UPDATE_TIMER               (1 * 1000)
92
93 /* time to wait for MAC or FCT to stop (jiffies) */
94 #define HW_DISABLE_TIMEOUT              (HZ / 10)
95
96 /* time to wait between polling MAC or FCT state (ms) */
97 #define HW_DISABLE_DELAY_MS             1
98
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP                      (32)
101 #define INT_EP_INTEP                    (31)
102 #define INT_EP_OTP_WR_DONE              (28)
103 #define INT_EP_EEE_TX_LPI_START         (26)
104 #define INT_EP_EEE_TX_LPI_STOP          (25)
105 #define INT_EP_EEE_RX_LPI               (24)
106 #define INT_EP_MAC_RESET_TIMEOUT        (23)
107 #define INT_EP_RDFO                     (22)
108 #define INT_EP_TXE                      (21)
109 #define INT_EP_USB_STATUS               (20)
110 #define INT_EP_TX_DIS                   (19)
111 #define INT_EP_RX_DIS                   (18)
112 #define INT_EP_PHY                      (17)
113 #define INT_EP_DP                       (16)
114 #define INT_EP_MAC_ERR                  (15)
115 #define INT_EP_TDFU                     (14)
116 #define INT_EP_TDFO                     (13)
117 #define INT_EP_UTX                      (12)
118 #define INT_EP_GPIO_11                  (11)
119 #define INT_EP_GPIO_10                  (10)
120 #define INT_EP_GPIO_9                   (9)
121 #define INT_EP_GPIO_8                   (8)
122 #define INT_EP_GPIO_7                   (7)
123 #define INT_EP_GPIO_6                   (6)
124 #define INT_EP_GPIO_5                   (5)
125 #define INT_EP_GPIO_4                   (4)
126 #define INT_EP_GPIO_3                   (3)
127 #define INT_EP_GPIO_2                   (2)
128 #define INT_EP_GPIO_1                   (1)
129 #define INT_EP_GPIO_0                   (0)
130
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132         "RX FCS Errors",
133         "RX Alignment Errors",
134         "Rx Fragment Errors",
135         "RX Jabber Errors",
136         "RX Undersize Frame Errors",
137         "RX Oversize Frame Errors",
138         "RX Dropped Frames",
139         "RX Unicast Byte Count",
140         "RX Broadcast Byte Count",
141         "RX Multicast Byte Count",
142         "RX Unicast Frames",
143         "RX Broadcast Frames",
144         "RX Multicast Frames",
145         "RX Pause Frames",
146         "RX 64 Byte Frames",
147         "RX 65 - 127 Byte Frames",
148         "RX 128 - 255 Byte Frames",
149         "RX 256 - 511 Bytes Frames",
150         "RX 512 - 1023 Byte Frames",
151         "RX 1024 - 1518 Byte Frames",
152         "RX Greater 1518 Byte Frames",
153         "EEE RX LPI Transitions",
154         "EEE RX LPI Time",
155         "TX FCS Errors",
156         "TX Excess Deferral Errors",
157         "TX Carrier Errors",
158         "TX Bad Byte Count",
159         "TX Single Collisions",
160         "TX Multiple Collisions",
161         "TX Excessive Collision",
162         "TX Late Collisions",
163         "TX Unicast Byte Count",
164         "TX Broadcast Byte Count",
165         "TX Multicast Byte Count",
166         "TX Unicast Frames",
167         "TX Broadcast Frames",
168         "TX Multicast Frames",
169         "TX Pause Frames",
170         "TX 64 Byte Frames",
171         "TX 65 - 127 Byte Frames",
172         "TX 128 - 255 Byte Frames",
173         "TX 256 - 511 Bytes Frames",
174         "TX 512 - 1023 Byte Frames",
175         "TX 1024 - 1518 Byte Frames",
176         "TX Greater 1518 Byte Frames",
177         "EEE TX LPI Transitions",
178         "EEE TX LPI Time",
179 };
180
181 struct lan78xx_statstage {
182         u32 rx_fcs_errors;
183         u32 rx_alignment_errors;
184         u32 rx_fragment_errors;
185         u32 rx_jabber_errors;
186         u32 rx_undersize_frame_errors;
187         u32 rx_oversize_frame_errors;
188         u32 rx_dropped_frames;
189         u32 rx_unicast_byte_count;
190         u32 rx_broadcast_byte_count;
191         u32 rx_multicast_byte_count;
192         u32 rx_unicast_frames;
193         u32 rx_broadcast_frames;
194         u32 rx_multicast_frames;
195         u32 rx_pause_frames;
196         u32 rx_64_byte_frames;
197         u32 rx_65_127_byte_frames;
198         u32 rx_128_255_byte_frames;
199         u32 rx_256_511_bytes_frames;
200         u32 rx_512_1023_byte_frames;
201         u32 rx_1024_1518_byte_frames;
202         u32 rx_greater_1518_byte_frames;
203         u32 eee_rx_lpi_transitions;
204         u32 eee_rx_lpi_time;
205         u32 tx_fcs_errors;
206         u32 tx_excess_deferral_errors;
207         u32 tx_carrier_errors;
208         u32 tx_bad_byte_count;
209         u32 tx_single_collisions;
210         u32 tx_multiple_collisions;
211         u32 tx_excessive_collision;
212         u32 tx_late_collisions;
213         u32 tx_unicast_byte_count;
214         u32 tx_broadcast_byte_count;
215         u32 tx_multicast_byte_count;
216         u32 tx_unicast_frames;
217         u32 tx_broadcast_frames;
218         u32 tx_multicast_frames;
219         u32 tx_pause_frames;
220         u32 tx_64_byte_frames;
221         u32 tx_65_127_byte_frames;
222         u32 tx_128_255_byte_frames;
223         u32 tx_256_511_bytes_frames;
224         u32 tx_512_1023_byte_frames;
225         u32 tx_1024_1518_byte_frames;
226         u32 tx_greater_1518_byte_frames;
227         u32 eee_tx_lpi_transitions;
228         u32 eee_tx_lpi_time;
229 };
230
231 struct lan78xx_statstage64 {
232         u64 rx_fcs_errors;
233         u64 rx_alignment_errors;
234         u64 rx_fragment_errors;
235         u64 rx_jabber_errors;
236         u64 rx_undersize_frame_errors;
237         u64 rx_oversize_frame_errors;
238         u64 rx_dropped_frames;
239         u64 rx_unicast_byte_count;
240         u64 rx_broadcast_byte_count;
241         u64 rx_multicast_byte_count;
242         u64 rx_unicast_frames;
243         u64 rx_broadcast_frames;
244         u64 rx_multicast_frames;
245         u64 rx_pause_frames;
246         u64 rx_64_byte_frames;
247         u64 rx_65_127_byte_frames;
248         u64 rx_128_255_byte_frames;
249         u64 rx_256_511_bytes_frames;
250         u64 rx_512_1023_byte_frames;
251         u64 rx_1024_1518_byte_frames;
252         u64 rx_greater_1518_byte_frames;
253         u64 eee_rx_lpi_transitions;
254         u64 eee_rx_lpi_time;
255         u64 tx_fcs_errors;
256         u64 tx_excess_deferral_errors;
257         u64 tx_carrier_errors;
258         u64 tx_bad_byte_count;
259         u64 tx_single_collisions;
260         u64 tx_multiple_collisions;
261         u64 tx_excessive_collision;
262         u64 tx_late_collisions;
263         u64 tx_unicast_byte_count;
264         u64 tx_broadcast_byte_count;
265         u64 tx_multicast_byte_count;
266         u64 tx_unicast_frames;
267         u64 tx_broadcast_frames;
268         u64 tx_multicast_frames;
269         u64 tx_pause_frames;
270         u64 tx_64_byte_frames;
271         u64 tx_65_127_byte_frames;
272         u64 tx_128_255_byte_frames;
273         u64 tx_256_511_bytes_frames;
274         u64 tx_512_1023_byte_frames;
275         u64 tx_1024_1518_byte_frames;
276         u64 tx_greater_1518_byte_frames;
277         u64 eee_tx_lpi_transitions;
278         u64 eee_tx_lpi_time;
279 };
280
281 static u32 lan78xx_regs[] = {
282         ID_REV,
283         INT_STS,
284         HW_CFG,
285         PMT_CTL,
286         E2P_CMD,
287         E2P_DATA,
288         USB_STATUS,
289         VLAN_TYPE,
290         MAC_CR,
291         MAC_RX,
292         MAC_TX,
293         FLOW,
294         ERR_STS,
295         MII_ACC,
296         MII_DATA,
297         EEE_TX_LPI_REQ_DLY,
298         EEE_TW_TX_SYS,
299         EEE_TX_LPI_REM_DLY,
300         WUCSR
301 };
302
303 #define PHY_REG_SIZE (32 * sizeof(u32))
304
305 struct lan78xx_net;
306
307 struct lan78xx_priv {
308         struct lan78xx_net *dev;
309         u32 rfe_ctl;
310         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
311         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
312         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
313         struct mutex dataport_mutex; /* for dataport access */
314         spinlock_t rfe_ctl_lock; /* for rfe register access */
315         struct work_struct set_multicast;
316         struct work_struct set_vlan;
317         u32 wol;
318 };
319
320 enum skb_state {
321         illegal = 0,
322         tx_start,
323         tx_done,
324         rx_start,
325         rx_done,
326         rx_cleanup,
327         unlink_start
328 };
329
330 struct skb_data {               /* skb->cb is one of these */
331         struct urb *urb;
332         struct lan78xx_net *dev;
333         enum skb_state state;
334         size_t length;
335         int num_of_packet;
336 };
337
338 struct usb_context {
339         struct usb_ctrlrequest req;
340         struct lan78xx_net *dev;
341 };
342
343 #define EVENT_TX_HALT                   0
344 #define EVENT_RX_HALT                   1
345 #define EVENT_RX_MEMORY                 2
346 #define EVENT_STS_SPLIT                 3
347 #define EVENT_LINK_RESET                4
348 #define EVENT_RX_PAUSED                 5
349 #define EVENT_DEV_WAKING                6
350 #define EVENT_DEV_ASLEEP                7
351 #define EVENT_DEV_OPEN                  8
352 #define EVENT_STAT_UPDATE               9
353
354 struct statstage {
355         struct mutex                    access_lock;    /* for stats access */
356         struct lan78xx_statstage        saved;
357         struct lan78xx_statstage        rollover_count;
358         struct lan78xx_statstage        rollover_max;
359         struct lan78xx_statstage64      curr_stat;
360 };
361
362 struct irq_domain_data {
363         struct irq_domain       *irqdomain;
364         unsigned int            phyirq;
365         struct irq_chip         *irqchip;
366         irq_flow_handler_t      irq_handler;
367         u32                     irqenable;
368         struct mutex            irq_lock;               /* for irq bus access */
369 };
370
371 struct lan78xx_net {
372         struct net_device       *net;
373         struct usb_device       *udev;
374         struct usb_interface    *intf;
375         void                    *driver_priv;
376
377         int                     rx_qlen;
378         int                     tx_qlen;
379         struct sk_buff_head     rxq;
380         struct sk_buff_head     txq;
381         struct sk_buff_head     done;
382         struct sk_buff_head     rxq_pause;
383         struct sk_buff_head     txq_pend;
384
385         struct tasklet_struct   bh;
386         struct delayed_work     wq;
387
388         int                     msg_enable;
389
390         struct urb              *urb_intr;
391         struct usb_anchor       deferred;
392
393         struct mutex            dev_mutex; /* serialise open/stop wrt suspend/resume */
394         struct mutex            phy_mutex; /* for phy access */
395         unsigned int            pipe_in, pipe_out, pipe_intr;
396
397         u32                     hard_mtu;       /* count any extra framing */
398         size_t                  rx_urb_size;    /* size for rx urbs */
399
400         unsigned long           flags;
401
402         wait_queue_head_t       *wait;
403         unsigned char           suspend_count;
404
405         unsigned int            maxpacket;
406         struct timer_list       delay;
407         struct timer_list       stat_monitor;
408
409         unsigned long           data[5];
410
411         int                     link_on;
412         u8                      mdix_ctrl;
413
414         u32                     chipid;
415         u32                     chiprev;
416         struct mii_bus          *mdiobus;
417         phy_interface_t         interface;
418
419         int                     fc_autoneg;
420         u8                      fc_request_control;
421
422         int                     delta;
423         struct statstage        stats;
424
425         struct irq_domain_data  domain_data;
426 };
427
428 /* define external phy id */
429 #define PHY_LAN8835                     (0x0007C130)
430 #define PHY_KSZ9031RNX                  (0x00221620)
431
432 /* use ethtool to change the level for any given device */
433 static int msg_level = -1;
434 module_param(msg_level, int, 0);
435 MODULE_PARM_DESC(msg_level, "Override default message level");
436
437 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
438 {
439         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
440         int ret;
441
442         if (!buf)
443                 return -ENOMEM;
444
445         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
446                               USB_VENDOR_REQUEST_READ_REGISTER,
447                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
449         if (likely(ret >= 0)) {
450                 le32_to_cpus(buf);
451                 *data = *buf;
452         } else {
453                 netdev_warn(dev->net,
454                             "Failed to read register index 0x%08x. ret = %d",
455                             index, ret);
456         }
457
458         kfree(buf);
459
460         return ret;
461 }
462
463 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
464 {
465         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
466         int ret;
467
468         if (!buf)
469                 return -ENOMEM;
470
471         *buf = data;
472         cpu_to_le32s(buf);
473
474         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
475                               USB_VENDOR_REQUEST_WRITE_REGISTER,
476                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
478         if (unlikely(ret < 0)) {
479                 netdev_warn(dev->net,
480                             "Failed to write register index 0x%08x. ret = %d",
481                             index, ret);
482         }
483
484         kfree(buf);
485
486         return ret;
487 }
488
489 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
490                               u32 data)
491 {
492         int ret;
493         u32 buf;
494
495         ret = lan78xx_read_reg(dev, reg, &buf);
496         if (ret < 0)
497                 return ret;
498
499         buf &= ~mask;
500         buf |= (mask & data);
501
502         ret = lan78xx_write_reg(dev, reg, buf);
503         if (ret < 0)
504                 return ret;
505
506         return 0;
507 }
508
509 static int lan78xx_read_stats(struct lan78xx_net *dev,
510                               struct lan78xx_statstage *data)
511 {
512         int ret = 0;
513         int i;
514         struct lan78xx_statstage *stats;
515         u32 *src;
516         u32 *dst;
517
518         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
519         if (!stats)
520                 return -ENOMEM;
521
522         ret = usb_control_msg(dev->udev,
523                               usb_rcvctrlpipe(dev->udev, 0),
524                               USB_VENDOR_REQUEST_GET_STATS,
525                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
526                               0,
527                               0,
528                               (void *)stats,
529                               sizeof(*stats),
530                               USB_CTRL_SET_TIMEOUT);
531         if (likely(ret >= 0)) {
532                 src = (u32 *)stats;
533                 dst = (u32 *)data;
534                 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
535                         le32_to_cpus(&src[i]);
536                         dst[i] = src[i];
537                 }
538         } else {
539                 netdev_warn(dev->net,
540                             "Failed to read stat ret = %d", ret);
541         }
542
543         kfree(stats);
544
545         return ret;
546 }
547
548 #define check_counter_rollover(struct1, dev_stats, member)              \
549         do {                                                            \
550                 if ((struct1)->member < (dev_stats).saved.member)       \
551                         (dev_stats).rollover_count.member++;            \
552         } while (0)
553
554 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
555                                         struct lan78xx_statstage *stats)
556 {
557         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
558         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
559         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
560         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
561         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
562         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
563         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
564         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
565         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
566         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
567         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
568         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
569         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
570         check_counter_rollover(stats, dev->stats, rx_pause_frames);
571         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
572         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
573         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
574         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
575         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
576         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
577         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
578         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
579         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
580         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
581         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
582         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
583         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
584         check_counter_rollover(stats, dev->stats, tx_single_collisions);
585         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
586         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
587         check_counter_rollover(stats, dev->stats, tx_late_collisions);
588         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
589         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
590         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
591         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
592         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
593         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
594         check_counter_rollover(stats, dev->stats, tx_pause_frames);
595         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
596         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
597         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
598         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
599         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
600         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
601         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
602         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
603         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
604
605         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
606 }
607
608 static void lan78xx_update_stats(struct lan78xx_net *dev)
609 {
610         u32 *p, *count, *max;
611         u64 *data;
612         int i;
613         struct lan78xx_statstage lan78xx_stats;
614
615         if (usb_autopm_get_interface(dev->intf) < 0)
616                 return;
617
618         p = (u32 *)&lan78xx_stats;
619         count = (u32 *)&dev->stats.rollover_count;
620         max = (u32 *)&dev->stats.rollover_max;
621         data = (u64 *)&dev->stats.curr_stat;
622
623         mutex_lock(&dev->stats.access_lock);
624
625         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
626                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
627
628         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
629                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
630
631         mutex_unlock(&dev->stats.access_lock);
632
633         usb_autopm_put_interface(dev->intf);
634 }
635
636 /* Loop until the read is completed with timeout called with phy_mutex held */
637 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
638 {
639         unsigned long start_time = jiffies;
640         u32 val;
641         int ret;
642
643         do {
644                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
645                 if (unlikely(ret < 0))
646                         return -EIO;
647
648                 if (!(val & MII_ACC_MII_BUSY_))
649                         return 0;
650         } while (!time_after(jiffies, start_time + HZ));
651
652         return -EIO;
653 }
654
655 static inline u32 mii_access(int id, int index, int read)
656 {
657         u32 ret;
658
659         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
660         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
661         if (read)
662                 ret |= MII_ACC_MII_READ_;
663         else
664                 ret |= MII_ACC_MII_WRITE_;
665         ret |= MII_ACC_MII_BUSY_;
666
667         return ret;
668 }
669
670 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
671 {
672         unsigned long start_time = jiffies;
673         u32 val;
674         int ret;
675
676         do {
677                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
678                 if (unlikely(ret < 0))
679                         return -EIO;
680
681                 if (!(val & E2P_CMD_EPC_BUSY_) ||
682                     (val & E2P_CMD_EPC_TIMEOUT_))
683                         break;
684                 usleep_range(40, 100);
685         } while (!time_after(jiffies, start_time + HZ));
686
687         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
688                 netdev_warn(dev->net, "EEPROM read operation timeout");
689                 return -EIO;
690         }
691
692         return 0;
693 }
694
695 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
696 {
697         unsigned long start_time = jiffies;
698         u32 val;
699         int ret;
700
701         do {
702                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
703                 if (unlikely(ret < 0))
704                         return -EIO;
705
706                 if (!(val & E2P_CMD_EPC_BUSY_))
707                         return 0;
708
709                 usleep_range(40, 100);
710         } while (!time_after(jiffies, start_time + HZ));
711
712         netdev_warn(dev->net, "EEPROM is busy");
713         return -EIO;
714 }
715
716 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
717                                    u32 length, u8 *data)
718 {
719         u32 val;
720         u32 saved;
721         int i, ret;
722         int retval;
723
724         /* depends on chip, some EEPROM pins are muxed with LED function.
725          * disable & restore LED function to access EEPROM.
726          */
727         ret = lan78xx_read_reg(dev, HW_CFG, &val);
728         saved = val;
729         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
730                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
731                 ret = lan78xx_write_reg(dev, HW_CFG, val);
732         }
733
734         retval = lan78xx_eeprom_confirm_not_busy(dev);
735         if (retval)
736                 return retval;
737
738         for (i = 0; i < length; i++) {
739                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
740                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
741                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
742                 if (unlikely(ret < 0)) {
743                         retval = -EIO;
744                         goto exit;
745                 }
746
747                 retval = lan78xx_wait_eeprom(dev);
748                 if (retval < 0)
749                         goto exit;
750
751                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
752                 if (unlikely(ret < 0)) {
753                         retval = -EIO;
754                         goto exit;
755                 }
756
757                 data[i] = val & 0xFF;
758                 offset++;
759         }
760
761         retval = 0;
762 exit:
763         if (dev->chipid == ID_REV_CHIP_ID_7800_)
764                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
765
766         return retval;
767 }
768
769 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
770                                u32 length, u8 *data)
771 {
772         u8 sig;
773         int ret;
774
775         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
776         if ((ret == 0) && (sig == EEPROM_INDICATOR))
777                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
778         else
779                 ret = -EINVAL;
780
781         return ret;
782 }
783
784 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
785                                     u32 length, u8 *data)
786 {
787         u32 val;
788         u32 saved;
789         int i, ret;
790         int retval;
791
792         /* depends on chip, some EEPROM pins are muxed with LED function.
793          * disable & restore LED function to access EEPROM.
794          */
795         ret = lan78xx_read_reg(dev, HW_CFG, &val);
796         saved = val;
797         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
798                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
799                 ret = lan78xx_write_reg(dev, HW_CFG, val);
800         }
801
802         retval = lan78xx_eeprom_confirm_not_busy(dev);
803         if (retval)
804                 goto exit;
805
806         /* Issue write/erase enable command */
807         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
808         ret = lan78xx_write_reg(dev, E2P_CMD, val);
809         if (unlikely(ret < 0)) {
810                 retval = -EIO;
811                 goto exit;
812         }
813
814         retval = lan78xx_wait_eeprom(dev);
815         if (retval < 0)
816                 goto exit;
817
818         for (i = 0; i < length; i++) {
819                 /* Fill data register */
820                 val = data[i];
821                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
822                 if (ret < 0) {
823                         retval = -EIO;
824                         goto exit;
825                 }
826
827                 /* Send "write" command */
828                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
829                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
830                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
831                 if (ret < 0) {
832                         retval = -EIO;
833                         goto exit;
834                 }
835
836                 retval = lan78xx_wait_eeprom(dev);
837                 if (retval < 0)
838                         goto exit;
839
840                 offset++;
841         }
842
843         retval = 0;
844 exit:
845         if (dev->chipid == ID_REV_CHIP_ID_7800_)
846                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
847
848         return retval;
849 }
850
851 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
852                                 u32 length, u8 *data)
853 {
854         int i;
855         u32 buf;
856         unsigned long timeout;
857
858         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
859
860         if (buf & OTP_PWR_DN_PWRDN_N_) {
861                 /* clear it and wait to be cleared */
862                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
863
864                 timeout = jiffies + HZ;
865                 do {
866                         usleep_range(1, 10);
867                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
868                         if (time_after(jiffies, timeout)) {
869                                 netdev_warn(dev->net,
870                                             "timeout on OTP_PWR_DN");
871                                 return -EIO;
872                         }
873                 } while (buf & OTP_PWR_DN_PWRDN_N_);
874         }
875
876         for (i = 0; i < length; i++) {
877                 lan78xx_write_reg(dev, OTP_ADDR1,
878                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
879                 lan78xx_write_reg(dev, OTP_ADDR2,
880                                   ((offset + i) & OTP_ADDR2_10_3));
881
882                 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
883                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
884
885                 timeout = jiffies + HZ;
886                 do {
887                         udelay(1);
888                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
889                         if (time_after(jiffies, timeout)) {
890                                 netdev_warn(dev->net,
891                                             "timeout on OTP_STATUS");
892                                 return -EIO;
893                         }
894                 } while (buf & OTP_STATUS_BUSY_);
895
896                 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
897
898                 data[i] = (u8)(buf & 0xFF);
899         }
900
901         return 0;
902 }
903
904 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
905                                  u32 length, u8 *data)
906 {
907         int i;
908         u32 buf;
909         unsigned long timeout;
910
911         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
912
913         if (buf & OTP_PWR_DN_PWRDN_N_) {
914                 /* clear it and wait to be cleared */
915                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
916
917                 timeout = jiffies + HZ;
918                 do {
919                         udelay(1);
920                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
921                         if (time_after(jiffies, timeout)) {
922                                 netdev_warn(dev->net,
923                                             "timeout on OTP_PWR_DN completion");
924                                 return -EIO;
925                         }
926                 } while (buf & OTP_PWR_DN_PWRDN_N_);
927         }
928
929         /* set to BYTE program mode */
930         lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
931
932         for (i = 0; i < length; i++) {
933                 lan78xx_write_reg(dev, OTP_ADDR1,
934                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
935                 lan78xx_write_reg(dev, OTP_ADDR2,
936                                   ((offset + i) & OTP_ADDR2_10_3));
937                 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
938                 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
939                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
940
941                 timeout = jiffies + HZ;
942                 do {
943                         udelay(1);
944                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
945                         if (time_after(jiffies, timeout)) {
946                                 netdev_warn(dev->net,
947                                             "Timeout on OTP_STATUS completion");
948                                 return -EIO;
949                         }
950                 } while (buf & OTP_STATUS_BUSY_);
951         }
952
953         return 0;
954 }
955
956 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
957                             u32 length, u8 *data)
958 {
959         u8 sig;
960         int ret;
961
962         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
963
964         if (ret == 0) {
965                 if (sig == OTP_INDICATOR_2)
966                         offset += 0x100;
967                 else if (sig != OTP_INDICATOR_1)
968                         ret = -EINVAL;
969                 if (!ret)
970                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
971         }
972
973         return ret;
974 }
975
976 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
977 {
978         int i, ret;
979
980         for (i = 0; i < 100; i++) {
981                 u32 dp_sel;
982
983                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
984                 if (unlikely(ret < 0))
985                         return -EIO;
986
987                 if (dp_sel & DP_SEL_DPRDY_)
988                         return 0;
989
990                 usleep_range(40, 100);
991         }
992
993         netdev_warn(dev->net, "%s timed out", __func__);
994
995         return -EIO;
996 }
997
998 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
999                                   u32 addr, u32 length, u32 *buf)
1000 {
1001         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1002         u32 dp_sel;
1003         int i, ret;
1004
1005         if (usb_autopm_get_interface(dev->intf) < 0)
1006                 return 0;
1007
1008         mutex_lock(&pdata->dataport_mutex);
1009
1010         ret = lan78xx_dataport_wait_not_busy(dev);
1011         if (ret < 0)
1012                 goto done;
1013
1014         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1015
1016         dp_sel &= ~DP_SEL_RSEL_MASK_;
1017         dp_sel |= ram_select;
1018         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1019
1020         for (i = 0; i < length; i++) {
1021                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1022
1023                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1024
1025                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1026
1027                 ret = lan78xx_dataport_wait_not_busy(dev);
1028                 if (ret < 0)
1029                         goto done;
1030         }
1031
1032 done:
1033         mutex_unlock(&pdata->dataport_mutex);
1034         usb_autopm_put_interface(dev->intf);
1035
1036         return ret;
1037 }
1038
1039 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1040                                     int index, u8 addr[ETH_ALEN])
1041 {
1042         u32 temp;
1043
1044         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1045                 temp = addr[3];
1046                 temp = addr[2] | (temp << 8);
1047                 temp = addr[1] | (temp << 8);
1048                 temp = addr[0] | (temp << 8);
1049                 pdata->pfilter_table[index][1] = temp;
1050                 temp = addr[5];
1051                 temp = addr[4] | (temp << 8);
1052                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1053                 pdata->pfilter_table[index][0] = temp;
1054         }
1055 }
1056
1057 /* returns hash bit number for given MAC address */
1058 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1059 {
1060         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1061 }
1062
1063 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1064 {
1065         struct lan78xx_priv *pdata =
1066                         container_of(param, struct lan78xx_priv, set_multicast);
1067         struct lan78xx_net *dev = pdata->dev;
1068         int i;
1069
1070         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1071                   pdata->rfe_ctl);
1072
1073         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1074                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1075
1076         for (i = 1; i < NUM_OF_MAF; i++) {
1077                 lan78xx_write_reg(dev, MAF_HI(i), 0);
1078                 lan78xx_write_reg(dev, MAF_LO(i),
1079                                   pdata->pfilter_table[i][1]);
1080                 lan78xx_write_reg(dev, MAF_HI(i),
1081                                   pdata->pfilter_table[i][0]);
1082         }
1083
1084         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1085 }
1086
1087 static void lan78xx_set_multicast(struct net_device *netdev)
1088 {
1089         struct lan78xx_net *dev = netdev_priv(netdev);
1090         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1091         unsigned long flags;
1092         int i;
1093
1094         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1095
1096         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1097                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1098
1099         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1100                 pdata->mchash_table[i] = 0;
1101
1102         /* pfilter_table[0] has own HW address */
1103         for (i = 1; i < NUM_OF_MAF; i++) {
1104                 pdata->pfilter_table[i][0] = 0;
1105                 pdata->pfilter_table[i][1] = 0;
1106         }
1107
1108         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1109
1110         if (dev->net->flags & IFF_PROMISC) {
1111                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1112                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1113         } else {
1114                 if (dev->net->flags & IFF_ALLMULTI) {
1115                         netif_dbg(dev, drv, dev->net,
1116                                   "receive all multicast enabled");
1117                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1118                 }
1119         }
1120
1121         if (netdev_mc_count(dev->net)) {
1122                 struct netdev_hw_addr *ha;
1123                 int i;
1124
1125                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1126
1127                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1128
1129                 i = 1;
1130                 netdev_for_each_mc_addr(ha, netdev) {
1131                         /* set first 32 into Perfect Filter */
1132                         if (i < 33) {
1133                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1134                         } else {
1135                                 u32 bitnum = lan78xx_hash(ha->addr);
1136
1137                                 pdata->mchash_table[bitnum / 32] |=
1138                                                         (1 << (bitnum % 32));
1139                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1140                         }
1141                         i++;
1142                 }
1143         }
1144
1145         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1146
1147         /* defer register writes to a sleepable context */
1148         schedule_work(&pdata->set_multicast);
1149 }
1150
1151 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1152                                       u16 lcladv, u16 rmtadv)
1153 {
1154         u32 flow = 0, fct_flow = 0;
1155         u8 cap;
1156
1157         if (dev->fc_autoneg)
1158                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1159         else
1160                 cap = dev->fc_request_control;
1161
1162         if (cap & FLOW_CTRL_TX)
1163                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1164
1165         if (cap & FLOW_CTRL_RX)
1166                 flow |= FLOW_CR_RX_FCEN_;
1167
1168         if (dev->udev->speed == USB_SPEED_SUPER)
1169                 fct_flow = 0x817;
1170         else if (dev->udev->speed == USB_SPEED_HIGH)
1171                 fct_flow = 0x211;
1172
1173         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1174                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1175                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1176
1177         lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1178
1179         /* threshold value should be set before enabling flow */
1180         lan78xx_write_reg(dev, FLOW, flow);
1181
1182         return 0;
1183 }
1184
1185 static int lan78xx_link_reset(struct lan78xx_net *dev)
1186 {
1187         struct phy_device *phydev = dev->net->phydev;
1188         struct ethtool_link_ksettings ecmd;
1189         int ladv, radv, ret, link;
1190         u32 buf;
1191
1192         /* clear LAN78xx interrupt status */
1193         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1194         if (unlikely(ret < 0))
1195                 return ret;
1196
1197         mutex_lock(&phydev->lock);
1198         phy_read_status(phydev);
1199         link = phydev->link;
1200         mutex_unlock(&phydev->lock);
1201
1202         if (!link && dev->link_on) {
1203                 dev->link_on = false;
1204
1205                 /* reset MAC */
1206                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1207                 if (unlikely(ret < 0))
1208                         return ret;
1209                 buf |= MAC_CR_RST_;
1210                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1211                 if (unlikely(ret < 0))
1212                         return ret;
1213
1214                 del_timer(&dev->stat_monitor);
1215         } else if (link && !dev->link_on) {
1216                 dev->link_on = true;
1217
1218                 phy_ethtool_ksettings_get(phydev, &ecmd);
1219
1220                 if (dev->udev->speed == USB_SPEED_SUPER) {
1221                         if (ecmd.base.speed == 1000) {
1222                                 /* disable U2 */
1223                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1224                                 if (ret < 0)
1225                                         return ret;
1226                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1227                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1228                                 if (ret < 0)
1229                                         return ret;
1230                                 /* enable U1 */
1231                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1232                                 if (ret < 0)
1233                                         return ret;
1234                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1235                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1236                                 if (ret < 0)
1237                                         return ret;
1238                         } else {
1239                                 /* enable U1 & U2 */
1240                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1241                                 if (ret < 0)
1242                                         return ret;
1243                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1244                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1245                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1246                                 if (ret < 0)
1247                                         return ret;
1248                         }
1249                 }
1250
1251                 ladv = phy_read(phydev, MII_ADVERTISE);
1252                 if (ladv < 0)
1253                         return ladv;
1254
1255                 radv = phy_read(phydev, MII_LPA);
1256                 if (radv < 0)
1257                         return radv;
1258
1259                 netif_dbg(dev, link, dev->net,
1260                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1261                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1262
1263                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1264                                                  radv);
1265                 if (ret < 0)
1266                         return ret;
1267
1268                 if (!timer_pending(&dev->stat_monitor)) {
1269                         dev->delta = 1;
1270                         mod_timer(&dev->stat_monitor,
1271                                   jiffies + STAT_UPDATE_TIMER);
1272                 }
1273
1274                 tasklet_schedule(&dev->bh);
1275         }
1276
1277         return 0;
1278 }
1279
1280 /* some work can't be done in tasklets, so we use keventd
1281  *
1282  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1283  * but tasklet_schedule() doesn't.      hope the failure is rare.
1284  */
1285 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1286 {
1287         set_bit(work, &dev->flags);
1288         if (!schedule_delayed_work(&dev->wq, 0))
1289                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1290 }
1291
1292 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1293 {
1294         u32 intdata;
1295
1296         if (urb->actual_length != 4) {
1297                 netdev_warn(dev->net,
1298                             "unexpected urb length %d", urb->actual_length);
1299                 return;
1300         }
1301
1302         intdata = get_unaligned_le32(urb->transfer_buffer);
1303
1304         if (intdata & INT_ENP_PHY_INT) {
1305                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1306                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1307
1308                 if (dev->domain_data.phyirq > 0) {
1309                         local_irq_disable();
1310                         generic_handle_irq(dev->domain_data.phyirq);
1311                         local_irq_enable();
1312                 }
1313         } else {
1314                 netdev_warn(dev->net,
1315                             "unexpected interrupt: 0x%08x\n", intdata);
1316         }
1317 }
1318
1319 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1320 {
1321         return MAX_EEPROM_SIZE;
1322 }
1323
1324 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1325                                       struct ethtool_eeprom *ee, u8 *data)
1326 {
1327         struct lan78xx_net *dev = netdev_priv(netdev);
1328         int ret;
1329
1330         ret = usb_autopm_get_interface(dev->intf);
1331         if (ret)
1332                 return ret;
1333
1334         ee->magic = LAN78XX_EEPROM_MAGIC;
1335
1336         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1337
1338         usb_autopm_put_interface(dev->intf);
1339
1340         return ret;
1341 }
1342
1343 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1344                                       struct ethtool_eeprom *ee, u8 *data)
1345 {
1346         struct lan78xx_net *dev = netdev_priv(netdev);
1347         int ret;
1348
1349         ret = usb_autopm_get_interface(dev->intf);
1350         if (ret)
1351                 return ret;
1352
1353         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1354          * to load data from EEPROM
1355          */
1356         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1357                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1358         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1359                  (ee->offset == 0) &&
1360                  (ee->len == 512) &&
1361                  (data[0] == OTP_INDICATOR_1))
1362                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1363
1364         usb_autopm_put_interface(dev->intf);
1365
1366         return ret;
1367 }
1368
1369 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1370                                 u8 *data)
1371 {
1372         if (stringset == ETH_SS_STATS)
1373                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1374 }
1375
1376 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1377 {
1378         if (sset == ETH_SS_STATS)
1379                 return ARRAY_SIZE(lan78xx_gstrings);
1380         else
1381                 return -EOPNOTSUPP;
1382 }
1383
1384 static void lan78xx_get_stats(struct net_device *netdev,
1385                               struct ethtool_stats *stats, u64 *data)
1386 {
1387         struct lan78xx_net *dev = netdev_priv(netdev);
1388
1389         lan78xx_update_stats(dev);
1390
1391         mutex_lock(&dev->stats.access_lock);
1392         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1393         mutex_unlock(&dev->stats.access_lock);
1394 }
1395
1396 static void lan78xx_get_wol(struct net_device *netdev,
1397                             struct ethtool_wolinfo *wol)
1398 {
1399         struct lan78xx_net *dev = netdev_priv(netdev);
1400         int ret;
1401         u32 buf;
1402         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1403
1404         if (usb_autopm_get_interface(dev->intf) < 0)
1405                 return;
1406
1407         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1408         if (unlikely(ret < 0)) {
1409                 wol->supported = 0;
1410                 wol->wolopts = 0;
1411         } else {
1412                 if (buf & USB_CFG_RMT_WKP_) {
1413                         wol->supported = WAKE_ALL;
1414                         wol->wolopts = pdata->wol;
1415                 } else {
1416                         wol->supported = 0;
1417                         wol->wolopts = 0;
1418                 }
1419         }
1420
1421         usb_autopm_put_interface(dev->intf);
1422 }
1423
1424 static int lan78xx_set_wol(struct net_device *netdev,
1425                            struct ethtool_wolinfo *wol)
1426 {
1427         struct lan78xx_net *dev = netdev_priv(netdev);
1428         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1429         int ret;
1430
1431         ret = usb_autopm_get_interface(dev->intf);
1432         if (ret < 0)
1433                 return ret;
1434
1435         if (wol->wolopts & ~WAKE_ALL)
1436                 return -EINVAL;
1437
1438         pdata->wol = wol->wolopts;
1439
1440         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1441
1442         phy_ethtool_set_wol(netdev->phydev, wol);
1443
1444         usb_autopm_put_interface(dev->intf);
1445
1446         return ret;
1447 }
1448
1449 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1450 {
1451         struct lan78xx_net *dev = netdev_priv(net);
1452         struct phy_device *phydev = net->phydev;
1453         int ret;
1454         u32 buf;
1455
1456         ret = usb_autopm_get_interface(dev->intf);
1457         if (ret < 0)
1458                 return ret;
1459
1460         ret = phy_ethtool_get_eee(phydev, edata);
1461         if (ret < 0)
1462                 goto exit;
1463
1464         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1465         if (buf & MAC_CR_EEE_EN_) {
1466                 edata->eee_enabled = true;
1467                 edata->eee_active = !!(edata->advertised &
1468                                        edata->lp_advertised);
1469                 edata->tx_lpi_enabled = true;
1470                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1471                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1472                 edata->tx_lpi_timer = buf;
1473         } else {
1474                 edata->eee_enabled = false;
1475                 edata->eee_active = false;
1476                 edata->tx_lpi_enabled = false;
1477                 edata->tx_lpi_timer = 0;
1478         }
1479
1480         ret = 0;
1481 exit:
1482         usb_autopm_put_interface(dev->intf);
1483
1484         return ret;
1485 }
1486
1487 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1488 {
1489         struct lan78xx_net *dev = netdev_priv(net);
1490         int ret;
1491         u32 buf;
1492
1493         ret = usb_autopm_get_interface(dev->intf);
1494         if (ret < 0)
1495                 return ret;
1496
1497         if (edata->eee_enabled) {
1498                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1499                 buf |= MAC_CR_EEE_EN_;
1500                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1501
1502                 phy_ethtool_set_eee(net->phydev, edata);
1503
1504                 buf = (u32)edata->tx_lpi_timer;
1505                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1506         } else {
1507                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1508                 buf &= ~MAC_CR_EEE_EN_;
1509                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1510         }
1511
1512         usb_autopm_put_interface(dev->intf);
1513
1514         return 0;
1515 }
1516
1517 static u32 lan78xx_get_link(struct net_device *net)
1518 {
1519         u32 link;
1520
1521         mutex_lock(&net->phydev->lock);
1522         phy_read_status(net->phydev);
1523         link = net->phydev->link;
1524         mutex_unlock(&net->phydev->lock);
1525
1526         return link;
1527 }
1528
1529 static void lan78xx_get_drvinfo(struct net_device *net,
1530                                 struct ethtool_drvinfo *info)
1531 {
1532         struct lan78xx_net *dev = netdev_priv(net);
1533
1534         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1535         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1536 }
1537
1538 static u32 lan78xx_get_msglevel(struct net_device *net)
1539 {
1540         struct lan78xx_net *dev = netdev_priv(net);
1541
1542         return dev->msg_enable;
1543 }
1544
1545 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1546 {
1547         struct lan78xx_net *dev = netdev_priv(net);
1548
1549         dev->msg_enable = level;
1550 }
1551
1552 static int lan78xx_get_link_ksettings(struct net_device *net,
1553                                       struct ethtool_link_ksettings *cmd)
1554 {
1555         struct lan78xx_net *dev = netdev_priv(net);
1556         struct phy_device *phydev = net->phydev;
1557         int ret;
1558
1559         ret = usb_autopm_get_interface(dev->intf);
1560         if (ret < 0)
1561                 return ret;
1562
1563         phy_ethtool_ksettings_get(phydev, cmd);
1564
1565         usb_autopm_put_interface(dev->intf);
1566
1567         return ret;
1568 }
1569
1570 static int lan78xx_set_link_ksettings(struct net_device *net,
1571                                       const struct ethtool_link_ksettings *cmd)
1572 {
1573         struct lan78xx_net *dev = netdev_priv(net);
1574         struct phy_device *phydev = net->phydev;
1575         int ret = 0;
1576         int temp;
1577
1578         ret = usb_autopm_get_interface(dev->intf);
1579         if (ret < 0)
1580                 return ret;
1581
1582         /* change speed & duplex */
1583         ret = phy_ethtool_ksettings_set(phydev, cmd);
1584
1585         if (!cmd->base.autoneg) {
1586                 /* force link down */
1587                 temp = phy_read(phydev, MII_BMCR);
1588                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1589                 mdelay(1);
1590                 phy_write(phydev, MII_BMCR, temp);
1591         }
1592
1593         usb_autopm_put_interface(dev->intf);
1594
1595         return ret;
1596 }
1597
1598 static void lan78xx_get_pause(struct net_device *net,
1599                               struct ethtool_pauseparam *pause)
1600 {
1601         struct lan78xx_net *dev = netdev_priv(net);
1602         struct phy_device *phydev = net->phydev;
1603         struct ethtool_link_ksettings ecmd;
1604
1605         phy_ethtool_ksettings_get(phydev, &ecmd);
1606
1607         pause->autoneg = dev->fc_autoneg;
1608
1609         if (dev->fc_request_control & FLOW_CTRL_TX)
1610                 pause->tx_pause = 1;
1611
1612         if (dev->fc_request_control & FLOW_CTRL_RX)
1613                 pause->rx_pause = 1;
1614 }
1615
1616 static int lan78xx_set_pause(struct net_device *net,
1617                              struct ethtool_pauseparam *pause)
1618 {
1619         struct lan78xx_net *dev = netdev_priv(net);
1620         struct phy_device *phydev = net->phydev;
1621         struct ethtool_link_ksettings ecmd;
1622         int ret;
1623
1624         phy_ethtool_ksettings_get(phydev, &ecmd);
1625
1626         if (pause->autoneg && !ecmd.base.autoneg) {
1627                 ret = -EINVAL;
1628                 goto exit;
1629         }
1630
1631         dev->fc_request_control = 0;
1632         if (pause->rx_pause)
1633                 dev->fc_request_control |= FLOW_CTRL_RX;
1634
1635         if (pause->tx_pause)
1636                 dev->fc_request_control |= FLOW_CTRL_TX;
1637
1638         if (ecmd.base.autoneg) {
1639                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1640                 u32 mii_adv;
1641
1642                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1643                                    ecmd.link_modes.advertising);
1644                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1645                                    ecmd.link_modes.advertising);
1646                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1647                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1648                 linkmode_or(ecmd.link_modes.advertising, fc,
1649                             ecmd.link_modes.advertising);
1650
1651                 phy_ethtool_ksettings_set(phydev, &ecmd);
1652         }
1653
1654         dev->fc_autoneg = pause->autoneg;
1655
1656         ret = 0;
1657 exit:
1658         return ret;
1659 }
1660
1661 static int lan78xx_get_regs_len(struct net_device *netdev)
1662 {
1663         if (!netdev->phydev)
1664                 return (sizeof(lan78xx_regs));
1665         else
1666                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1667 }
1668
1669 static void
1670 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1671                  void *buf)
1672 {
1673         u32 *data = buf;
1674         int i, j;
1675         struct lan78xx_net *dev = netdev_priv(netdev);
1676
1677         /* Read Device/MAC registers */
1678         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1679                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1680
1681         if (!netdev->phydev)
1682                 return;
1683
1684         /* Read PHY registers */
1685         for (j = 0; j < 32; i++, j++)
1686                 data[i] = phy_read(netdev->phydev, j);
1687 }
1688
1689 static const struct ethtool_ops lan78xx_ethtool_ops = {
1690         .get_link       = lan78xx_get_link,
1691         .nway_reset     = phy_ethtool_nway_reset,
1692         .get_drvinfo    = lan78xx_get_drvinfo,
1693         .get_msglevel   = lan78xx_get_msglevel,
1694         .set_msglevel   = lan78xx_set_msglevel,
1695         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1696         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1697         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1698         .get_ethtool_stats = lan78xx_get_stats,
1699         .get_sset_count = lan78xx_get_sset_count,
1700         .get_strings    = lan78xx_get_strings,
1701         .get_wol        = lan78xx_get_wol,
1702         .set_wol        = lan78xx_set_wol,
1703         .get_eee        = lan78xx_get_eee,
1704         .set_eee        = lan78xx_set_eee,
1705         .get_pauseparam = lan78xx_get_pause,
1706         .set_pauseparam = lan78xx_set_pause,
1707         .get_link_ksettings = lan78xx_get_link_ksettings,
1708         .set_link_ksettings = lan78xx_set_link_ksettings,
1709         .get_regs_len   = lan78xx_get_regs_len,
1710         .get_regs       = lan78xx_get_regs,
1711 };
1712
1713 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1714 {
1715         u32 addr_lo, addr_hi;
1716         u8 addr[6];
1717
1718         lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1719         lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1720
1721         addr[0] = addr_lo & 0xFF;
1722         addr[1] = (addr_lo >> 8) & 0xFF;
1723         addr[2] = (addr_lo >> 16) & 0xFF;
1724         addr[3] = (addr_lo >> 24) & 0xFF;
1725         addr[4] = addr_hi & 0xFF;
1726         addr[5] = (addr_hi >> 8) & 0xFF;
1727
1728         if (!is_valid_ether_addr(addr)) {
1729                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1730                         /* valid address present in Device Tree */
1731                         netif_dbg(dev, ifup, dev->net,
1732                                   "MAC address read from Device Tree");
1733                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1734                                                  ETH_ALEN, addr) == 0) ||
1735                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1736                                               ETH_ALEN, addr) == 0)) &&
1737                            is_valid_ether_addr(addr)) {
1738                         /* eeprom values are valid so use them */
1739                         netif_dbg(dev, ifup, dev->net,
1740                                   "MAC address read from EEPROM");
1741                 } else {
1742                         /* generate random MAC */
1743                         eth_random_addr(addr);
1744                         netif_dbg(dev, ifup, dev->net,
1745                                   "MAC address set to random addr");
1746                 }
1747
1748                 addr_lo = addr[0] | (addr[1] << 8) |
1749                           (addr[2] << 16) | (addr[3] << 24);
1750                 addr_hi = addr[4] | (addr[5] << 8);
1751
1752                 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1753                 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1754         }
1755
1756         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1757         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1758
1759         ether_addr_copy(dev->net->dev_addr, addr);
1760 }
1761
1762 /* MDIO read and write wrappers for phylib */
1763 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1764 {
1765         struct lan78xx_net *dev = bus->priv;
1766         u32 val, addr;
1767         int ret;
1768
1769         ret = usb_autopm_get_interface(dev->intf);
1770         if (ret < 0)
1771                 return ret;
1772
1773         mutex_lock(&dev->phy_mutex);
1774
1775         /* confirm MII not busy */
1776         ret = lan78xx_phy_wait_not_busy(dev);
1777         if (ret < 0)
1778                 goto done;
1779
1780         /* set the address, index & direction (read from PHY) */
1781         addr = mii_access(phy_id, idx, MII_READ);
1782         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1783
1784         ret = lan78xx_phy_wait_not_busy(dev);
1785         if (ret < 0)
1786                 goto done;
1787
1788         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1789
1790         ret = (int)(val & 0xFFFF);
1791
1792 done:
1793         mutex_unlock(&dev->phy_mutex);
1794         usb_autopm_put_interface(dev->intf);
1795
1796         return ret;
1797 }
1798
1799 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1800                                  u16 regval)
1801 {
1802         struct lan78xx_net *dev = bus->priv;
1803         u32 val, addr;
1804         int ret;
1805
1806         ret = usb_autopm_get_interface(dev->intf);
1807         if (ret < 0)
1808                 return ret;
1809
1810         mutex_lock(&dev->phy_mutex);
1811
1812         /* confirm MII not busy */
1813         ret = lan78xx_phy_wait_not_busy(dev);
1814         if (ret < 0)
1815                 goto done;
1816
1817         val = (u32)regval;
1818         ret = lan78xx_write_reg(dev, MII_DATA, val);
1819
1820         /* set the address, index & direction (write to PHY) */
1821         addr = mii_access(phy_id, idx, MII_WRITE);
1822         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1823
1824         ret = lan78xx_phy_wait_not_busy(dev);
1825         if (ret < 0)
1826                 goto done;
1827
1828 done:
1829         mutex_unlock(&dev->phy_mutex);
1830         usb_autopm_put_interface(dev->intf);
1831         return 0;
1832 }
1833
1834 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1835 {
1836         struct device_node *node;
1837         int ret;
1838
1839         dev->mdiobus = mdiobus_alloc();
1840         if (!dev->mdiobus) {
1841                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1842                 return -ENOMEM;
1843         }
1844
1845         dev->mdiobus->priv = (void *)dev;
1846         dev->mdiobus->read = lan78xx_mdiobus_read;
1847         dev->mdiobus->write = lan78xx_mdiobus_write;
1848         dev->mdiobus->name = "lan78xx-mdiobus";
1849         dev->mdiobus->parent = &dev->udev->dev;
1850
1851         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1852                  dev->udev->bus->busnum, dev->udev->devnum);
1853
1854         switch (dev->chipid) {
1855         case ID_REV_CHIP_ID_7800_:
1856         case ID_REV_CHIP_ID_7850_:
1857                 /* set to internal PHY id */
1858                 dev->mdiobus->phy_mask = ~(1 << 1);
1859                 break;
1860         case ID_REV_CHIP_ID_7801_:
1861                 /* scan thru PHYAD[2..0] */
1862                 dev->mdiobus->phy_mask = ~(0xFF);
1863                 break;
1864         }
1865
1866         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1867         ret = of_mdiobus_register(dev->mdiobus, node);
1868         of_node_put(node);
1869         if (ret) {
1870                 netdev_err(dev->net, "can't register MDIO bus\n");
1871                 goto exit1;
1872         }
1873
1874         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1875         return 0;
1876 exit1:
1877         mdiobus_free(dev->mdiobus);
1878         return ret;
1879 }
1880
1881 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1882 {
1883         mdiobus_unregister(dev->mdiobus);
1884         mdiobus_free(dev->mdiobus);
1885 }
1886
1887 static void lan78xx_link_status_change(struct net_device *net)
1888 {
1889         struct phy_device *phydev = net->phydev;
1890
1891         phy_print_status(phydev);
1892 }
1893
1894 static int irq_map(struct irq_domain *d, unsigned int irq,
1895                    irq_hw_number_t hwirq)
1896 {
1897         struct irq_domain_data *data = d->host_data;
1898
1899         irq_set_chip_data(irq, data);
1900         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1901         irq_set_noprobe(irq);
1902
1903         return 0;
1904 }
1905
1906 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1907 {
1908         irq_set_chip_and_handler(irq, NULL, NULL);
1909         irq_set_chip_data(irq, NULL);
1910 }
1911
1912 static const struct irq_domain_ops chip_domain_ops = {
1913         .map    = irq_map,
1914         .unmap  = irq_unmap,
1915 };
1916
1917 static void lan78xx_irq_mask(struct irq_data *irqd)
1918 {
1919         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1920
1921         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1922 }
1923
1924 static void lan78xx_irq_unmask(struct irq_data *irqd)
1925 {
1926         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927
1928         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1929 }
1930
1931 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1932 {
1933         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1934
1935         mutex_lock(&data->irq_lock);
1936 }
1937
1938 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1939 {
1940         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1941         struct lan78xx_net *dev =
1942                         container_of(data, struct lan78xx_net, domain_data);
1943         u32 buf;
1944
1945         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1946          * are only two callbacks executed in non-atomic contex.
1947          */
1948         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1949         if (buf != data->irqenable)
1950                 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1951
1952         mutex_unlock(&data->irq_lock);
1953 }
1954
1955 static struct irq_chip lan78xx_irqchip = {
1956         .name                   = "lan78xx-irqs",
1957         .irq_mask               = lan78xx_irq_mask,
1958         .irq_unmask             = lan78xx_irq_unmask,
1959         .irq_bus_lock           = lan78xx_irq_bus_lock,
1960         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1961 };
1962
1963 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1964 {
1965         struct device_node *of_node;
1966         struct irq_domain *irqdomain;
1967         unsigned int irqmap = 0;
1968         u32 buf;
1969         int ret = 0;
1970
1971         of_node = dev->udev->dev.parent->of_node;
1972
1973         mutex_init(&dev->domain_data.irq_lock);
1974
1975         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1976         dev->domain_data.irqenable = buf;
1977
1978         dev->domain_data.irqchip = &lan78xx_irqchip;
1979         dev->domain_data.irq_handler = handle_simple_irq;
1980
1981         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1982                                           &chip_domain_ops, &dev->domain_data);
1983         if (irqdomain) {
1984                 /* create mapping for PHY interrupt */
1985                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1986                 if (!irqmap) {
1987                         irq_domain_remove(irqdomain);
1988
1989                         irqdomain = NULL;
1990                         ret = -EINVAL;
1991                 }
1992         } else {
1993                 ret = -EINVAL;
1994         }
1995
1996         dev->domain_data.irqdomain = irqdomain;
1997         dev->domain_data.phyirq = irqmap;
1998
1999         return ret;
2000 }
2001
2002 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2003 {
2004         if (dev->domain_data.phyirq > 0) {
2005                 irq_dispose_mapping(dev->domain_data.phyirq);
2006
2007                 if (dev->domain_data.irqdomain)
2008                         irq_domain_remove(dev->domain_data.irqdomain);
2009         }
2010         dev->domain_data.phyirq = 0;
2011         dev->domain_data.irqdomain = NULL;
2012 }
2013
2014 static int lan8835_fixup(struct phy_device *phydev)
2015 {
2016         int buf;
2017         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2018
2019         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2020         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2021         buf &= ~0x1800;
2022         buf |= 0x0800;
2023         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2024
2025         /* RGMII MAC TXC Delay Enable */
2026         lan78xx_write_reg(dev, MAC_RGMII_ID,
2027                           MAC_RGMII_ID_TXC_DELAY_EN_);
2028
2029         /* RGMII TX DLL Tune Adjust */
2030         lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2031
2032         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2033
2034         return 1;
2035 }
2036
2037 static int ksz9031rnx_fixup(struct phy_device *phydev)
2038 {
2039         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2040
2041         /* Micrel9301RNX PHY configuration */
2042         /* RGMII Control Signal Pad Skew */
2043         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2044         /* RGMII RX Data Pad Skew */
2045         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2046         /* RGMII RX Clock Pad Skew */
2047         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2048
2049         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2050
2051         return 1;
2052 }
2053
2054 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2055 {
2056         u32 buf;
2057         int ret;
2058         struct fixed_phy_status fphy_status = {
2059                 .link = 1,
2060                 .speed = SPEED_1000,
2061                 .duplex = DUPLEX_FULL,
2062         };
2063         struct phy_device *phydev;
2064
2065         phydev = phy_find_first(dev->mdiobus);
2066         if (!phydev) {
2067                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2068                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2069                 if (IS_ERR(phydev)) {
2070                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2071                         return NULL;
2072                 }
2073                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2074                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2075                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2076                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2077                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2078                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2079                 buf |= HW_CFG_CLK125_EN_;
2080                 buf |= HW_CFG_REFCLK25_EN_;
2081                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2082         } else {
2083                 if (!phydev->drv) {
2084                         netdev_err(dev->net, "no PHY driver found\n");
2085                         return NULL;
2086                 }
2087                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2088                 /* external PHY fixup for KSZ9031RNX */
2089                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2090                                                  ksz9031rnx_fixup);
2091                 if (ret < 0) {
2092                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2093                         return NULL;
2094                 }
2095                 /* external PHY fixup for LAN8835 */
2096                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2097                                                  lan8835_fixup);
2098                 if (ret < 0) {
2099                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2100                         return NULL;
2101                 }
2102                 /* add more external PHY fixup here if needed */
2103
2104                 phydev->is_internal = false;
2105         }
2106         return phydev;
2107 }
2108
2109 static int lan78xx_phy_init(struct lan78xx_net *dev)
2110 {
2111         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2112         int ret;
2113         u32 mii_adv;
2114         struct phy_device *phydev;
2115
2116         switch (dev->chipid) {
2117         case ID_REV_CHIP_ID_7801_:
2118                 phydev = lan7801_phy_init(dev);
2119                 if (!phydev) {
2120                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2121                         return -EIO;
2122                 }
2123                 break;
2124
2125         case ID_REV_CHIP_ID_7800_:
2126         case ID_REV_CHIP_ID_7850_:
2127                 phydev = phy_find_first(dev->mdiobus);
2128                 if (!phydev) {
2129                         netdev_err(dev->net, "no PHY found\n");
2130                         return -EIO;
2131                 }
2132                 phydev->is_internal = true;
2133                 dev->interface = PHY_INTERFACE_MODE_GMII;
2134                 break;
2135
2136         default:
2137                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2138                 return -EIO;
2139         }
2140
2141         /* if phyirq is not set, use polling mode in phylib */
2142         if (dev->domain_data.phyirq > 0)
2143                 phydev->irq = dev->domain_data.phyirq;
2144         else
2145                 phydev->irq = PHY_POLL;
2146         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2147
2148         /* set to AUTOMDIX */
2149         phydev->mdix = ETH_TP_MDI_AUTO;
2150
2151         ret = phy_connect_direct(dev->net, phydev,
2152                                  lan78xx_link_status_change,
2153                                  dev->interface);
2154         if (ret) {
2155                 netdev_err(dev->net, "can't attach PHY to %s\n",
2156                            dev->mdiobus->id);
2157                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2158                         if (phy_is_pseudo_fixed_link(phydev)) {
2159                                 fixed_phy_unregister(phydev);
2160                         } else {
2161                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2162                                                              0xfffffff0);
2163                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2164                                                              0xfffffff0);
2165                         }
2166                 }
2167                 return -EIO;
2168         }
2169
2170         /* MAC doesn't support 1000T Half */
2171         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2172
2173         /* support both flow controls */
2174         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2175         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2176                            phydev->advertising);
2177         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2178                            phydev->advertising);
2179         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2180         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2181         linkmode_or(phydev->advertising, fc, phydev->advertising);
2182
2183         if (phydev->mdio.dev.of_node) {
2184                 u32 reg;
2185                 int len;
2186
2187                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2188                                                       "microchip,led-modes",
2189                                                       sizeof(u32));
2190                 if (len >= 0) {
2191                         /* Ensure the appropriate LEDs are enabled */
2192                         lan78xx_read_reg(dev, HW_CFG, &reg);
2193                         reg &= ~(HW_CFG_LED0_EN_ |
2194                                  HW_CFG_LED1_EN_ |
2195                                  HW_CFG_LED2_EN_ |
2196                                  HW_CFG_LED3_EN_);
2197                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2198                                 (len > 1) * HW_CFG_LED1_EN_ |
2199                                 (len > 2) * HW_CFG_LED2_EN_ |
2200                                 (len > 3) * HW_CFG_LED3_EN_;
2201                         lan78xx_write_reg(dev, HW_CFG, reg);
2202                 }
2203         }
2204
2205         genphy_config_aneg(phydev);
2206
2207         dev->fc_autoneg = phydev->autoneg;
2208
2209         return 0;
2210 }
2211
2212 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2213 {
2214         u32 buf;
2215         bool rxenabled;
2216
2217         lan78xx_read_reg(dev, MAC_RX, &buf);
2218
2219         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2220
2221         if (rxenabled) {
2222                 buf &= ~MAC_RX_RXEN_;
2223                 lan78xx_write_reg(dev, MAC_RX, buf);
2224         }
2225
2226         /* add 4 to size for FCS */
2227         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2228         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2229
2230         lan78xx_write_reg(dev, MAC_RX, buf);
2231
2232         if (rxenabled) {
2233                 buf |= MAC_RX_RXEN_;
2234                 lan78xx_write_reg(dev, MAC_RX, buf);
2235         }
2236
2237         return 0;
2238 }
2239
2240 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2241 {
2242         struct sk_buff *skb;
2243         unsigned long flags;
2244         int count = 0;
2245
2246         spin_lock_irqsave(&q->lock, flags);
2247         while (!skb_queue_empty(q)) {
2248                 struct skb_data *entry;
2249                 struct urb *urb;
2250                 int ret;
2251
2252                 skb_queue_walk(q, skb) {
2253                         entry = (struct skb_data *)skb->cb;
2254                         if (entry->state != unlink_start)
2255                                 goto found;
2256                 }
2257                 break;
2258 found:
2259                 entry->state = unlink_start;
2260                 urb = entry->urb;
2261
2262                 /* Get reference count of the URB to avoid it to be
2263                  * freed during usb_unlink_urb, which may trigger
2264                  * use-after-free problem inside usb_unlink_urb since
2265                  * usb_unlink_urb is always racing with .complete
2266                  * handler(include defer_bh).
2267                  */
2268                 usb_get_urb(urb);
2269                 spin_unlock_irqrestore(&q->lock, flags);
2270                 /* during some PM-driven resume scenarios,
2271                  * these (async) unlinks complete immediately
2272                  */
2273                 ret = usb_unlink_urb(urb);
2274                 if (ret != -EINPROGRESS && ret != 0)
2275                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2276                 else
2277                         count++;
2278                 usb_put_urb(urb);
2279                 spin_lock_irqsave(&q->lock, flags);
2280         }
2281         spin_unlock_irqrestore(&q->lock, flags);
2282         return count;
2283 }
2284
2285 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2286 {
2287         struct lan78xx_net *dev = netdev_priv(netdev);
2288         int ll_mtu = new_mtu + netdev->hard_header_len;
2289         int old_hard_mtu = dev->hard_mtu;
2290         int old_rx_urb_size = dev->rx_urb_size;
2291         int ret;
2292
2293         /* no second zero-length packet read wanted after mtu-sized packets */
2294         if ((ll_mtu % dev->maxpacket) == 0)
2295                 return -EDOM;
2296
2297         ret = usb_autopm_get_interface(dev->intf);
2298         if (ret < 0)
2299                 return ret;
2300
2301         lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2302
2303         netdev->mtu = new_mtu;
2304
2305         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2306         if (dev->rx_urb_size == old_hard_mtu) {
2307                 dev->rx_urb_size = dev->hard_mtu;
2308                 if (dev->rx_urb_size > old_rx_urb_size) {
2309                         if (netif_running(dev->net)) {
2310                                 unlink_urbs(dev, &dev->rxq);
2311                                 tasklet_schedule(&dev->bh);
2312                         }
2313                 }
2314         }
2315
2316         usb_autopm_put_interface(dev->intf);
2317
2318         return 0;
2319 }
2320
2321 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2322 {
2323         struct lan78xx_net *dev = netdev_priv(netdev);
2324         struct sockaddr *addr = p;
2325         u32 addr_lo, addr_hi;
2326
2327         if (netif_running(netdev))
2328                 return -EBUSY;
2329
2330         if (!is_valid_ether_addr(addr->sa_data))
2331                 return -EADDRNOTAVAIL;
2332
2333         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2334
2335         addr_lo = netdev->dev_addr[0] |
2336                   netdev->dev_addr[1] << 8 |
2337                   netdev->dev_addr[2] << 16 |
2338                   netdev->dev_addr[3] << 24;
2339         addr_hi = netdev->dev_addr[4] |
2340                   netdev->dev_addr[5] << 8;
2341
2342         lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2343         lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2344
2345         /* Added to support MAC address changes */
2346         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2347         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2348
2349         return 0;
2350 }
2351
2352 /* Enable or disable Rx checksum offload engine */
2353 static int lan78xx_set_features(struct net_device *netdev,
2354                                 netdev_features_t features)
2355 {
2356         struct lan78xx_net *dev = netdev_priv(netdev);
2357         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2358         unsigned long flags;
2359
2360         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2361
2362         if (features & NETIF_F_RXCSUM) {
2363                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2364                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2365         } else {
2366                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2367                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2368         }
2369
2370         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2371                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2372         else
2373                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2374
2375         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2376                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2377         else
2378                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2379
2380         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2381
2382         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2383
2384         return 0;
2385 }
2386
2387 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2388 {
2389         struct lan78xx_priv *pdata =
2390                         container_of(param, struct lan78xx_priv, set_vlan);
2391         struct lan78xx_net *dev = pdata->dev;
2392
2393         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2394                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2395 }
2396
2397 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2398                                    __be16 proto, u16 vid)
2399 {
2400         struct lan78xx_net *dev = netdev_priv(netdev);
2401         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2402         u16 vid_bit_index;
2403         u16 vid_dword_index;
2404
2405         vid_dword_index = (vid >> 5) & 0x7F;
2406         vid_bit_index = vid & 0x1F;
2407
2408         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2409
2410         /* defer register writes to a sleepable context */
2411         schedule_work(&pdata->set_vlan);
2412
2413         return 0;
2414 }
2415
2416 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2417                                     __be16 proto, u16 vid)
2418 {
2419         struct lan78xx_net *dev = netdev_priv(netdev);
2420         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2421         u16 vid_bit_index;
2422         u16 vid_dword_index;
2423
2424         vid_dword_index = (vid >> 5) & 0x7F;
2425         vid_bit_index = vid & 0x1F;
2426
2427         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2428
2429         /* defer register writes to a sleepable context */
2430         schedule_work(&pdata->set_vlan);
2431
2432         return 0;
2433 }
2434
2435 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2436 {
2437         int ret;
2438         u32 buf;
2439         u32 regs[6] = { 0 };
2440
2441         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2442         if (buf & USB_CFG1_LTM_ENABLE_) {
2443                 u8 temp[2];
2444                 /* Get values from EEPROM first */
2445                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2446                         if (temp[0] == 24) {
2447                                 ret = lan78xx_read_raw_eeprom(dev,
2448                                                               temp[1] * 2,
2449                                                               24,
2450                                                               (u8 *)regs);
2451                                 if (ret < 0)
2452                                         return;
2453                         }
2454                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2455                         if (temp[0] == 24) {
2456                                 ret = lan78xx_read_raw_otp(dev,
2457                                                            temp[1] * 2,
2458                                                            24,
2459                                                            (u8 *)regs);
2460                                 if (ret < 0)
2461                                         return;
2462                         }
2463                 }
2464         }
2465
2466         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2467         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2468         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2469         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2470         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2471         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2472 }
2473
2474 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2475 {
2476         return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2477 }
2478
2479 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2480                            u32 hw_disabled)
2481 {
2482         unsigned long timeout;
2483         bool stopped = true;
2484         int ret;
2485         u32 buf;
2486
2487         /* Stop the h/w block (if not already stopped) */
2488
2489         ret = lan78xx_read_reg(dev, reg, &buf);
2490         if (ret < 0)
2491                 return ret;
2492
2493         if (buf & hw_enabled) {
2494                 buf &= ~hw_enabled;
2495
2496                 ret = lan78xx_write_reg(dev, reg, buf);
2497                 if (ret < 0)
2498                         return ret;
2499
2500                 stopped = false;
2501                 timeout = jiffies + HW_DISABLE_TIMEOUT;
2502                 do  {
2503                         ret = lan78xx_read_reg(dev, reg, &buf);
2504                         if (ret < 0)
2505                                 return ret;
2506
2507                         if (buf & hw_disabled)
2508                                 stopped = true;
2509                         else
2510                                 msleep(HW_DISABLE_DELAY_MS);
2511                 } while (!stopped && !time_after(jiffies, timeout));
2512         }
2513
2514         ret = stopped ? 0 : -ETIME;
2515
2516         return ret;
2517 }
2518
2519 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2520 {
2521         return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2522 }
2523
2524 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2525 {
2526         int ret;
2527
2528         netif_dbg(dev, drv, dev->net, "start tx path");
2529
2530         /* Start the MAC transmitter */
2531
2532         ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2533         if (ret < 0)
2534                 return ret;
2535
2536         /* Start the Tx FIFO */
2537
2538         ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2539         if (ret < 0)
2540                 return ret;
2541
2542         return 0;
2543 }
2544
2545 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2546 {
2547         int ret;
2548
2549         netif_dbg(dev, drv, dev->net, "stop tx path");
2550
2551         /* Stop the Tx FIFO */
2552
2553         ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2554         if (ret < 0)
2555                 return ret;
2556
2557         /* Stop the MAC transmitter */
2558
2559         ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2560         if (ret < 0)
2561                 return ret;
2562
2563         return 0;
2564 }
2565
2566 /* The caller must ensure the Tx path is stopped before calling
2567  * lan78xx_flush_tx_fifo().
2568  */
2569 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2570 {
2571         return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2572 }
2573
2574 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2575 {
2576         int ret;
2577
2578         netif_dbg(dev, drv, dev->net, "start rx path");
2579
2580         /* Start the Rx FIFO */
2581
2582         ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2583         if (ret < 0)
2584                 return ret;
2585
2586         /* Start the MAC receiver*/
2587
2588         ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2589         if (ret < 0)
2590                 return ret;
2591
2592         return 0;
2593 }
2594
2595 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2596 {
2597         int ret;
2598
2599         netif_dbg(dev, drv, dev->net, "stop rx path");
2600
2601         /* Stop the MAC receiver */
2602
2603         ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2604         if (ret < 0)
2605                 return ret;
2606
2607         /* Stop the Rx FIFO */
2608
2609         ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2610         if (ret < 0)
2611                 return ret;
2612
2613         return 0;
2614 }
2615
2616 /* The caller must ensure the Rx path is stopped before calling
2617  * lan78xx_flush_rx_fifo().
2618  */
2619 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2620 {
2621         return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2622 }
2623
2624 static int lan78xx_reset(struct lan78xx_net *dev)
2625 {
2626         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2627         unsigned long timeout;
2628         int ret;
2629         u32 buf;
2630         u8 sig;
2631
2632         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2633         if (ret < 0)
2634                 return ret;
2635
2636         buf |= HW_CFG_LRST_;
2637
2638         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2639         if (ret < 0)
2640                 return ret;
2641
2642         timeout = jiffies + HZ;
2643         do {
2644                 mdelay(1);
2645                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2646                 if (ret < 0)
2647                         return ret;
2648
2649                 if (time_after(jiffies, timeout)) {
2650                         netdev_warn(dev->net,
2651                                     "timeout on completion of LiteReset");
2652                         ret = -ETIMEDOUT;
2653                         return ret;
2654                 }
2655         } while (buf & HW_CFG_LRST_);
2656
2657         lan78xx_init_mac_address(dev);
2658
2659         /* save DEVID for later usage */
2660         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2661         if (ret < 0)
2662                 return ret;
2663
2664         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2665         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2666
2667         /* Respond to the IN token with a NAK */
2668         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2669         if (ret < 0)
2670                 return ret;
2671
2672         buf |= USB_CFG_BIR_;
2673
2674         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2675         if (ret < 0)
2676                 return ret;
2677
2678         /* Init LTM */
2679         lan78xx_init_ltm(dev);
2680
2681         if (dev->udev->speed == USB_SPEED_SUPER) {
2682                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2683                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2684                 dev->rx_qlen = 4;
2685                 dev->tx_qlen = 4;
2686         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2687                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2688                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2689                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2690                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2691         } else {
2692                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2693                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2694                 dev->rx_qlen = 4;
2695                 dev->tx_qlen = 4;
2696         }
2697
2698         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2699         if (ret < 0)
2700                 return ret;
2701
2702         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2703         if (ret < 0)
2704                 return ret;
2705
2706         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2707         if (ret < 0)
2708                 return ret;
2709
2710         buf |= HW_CFG_MEF_;
2711
2712         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2713         if (ret < 0)
2714                 return ret;
2715
2716         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2717         if (ret < 0)
2718                 return ret;
2719
2720         buf |= USB_CFG_BCE_;
2721
2722         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2723         if (ret < 0)
2724                 return ret;
2725
2726         /* set FIFO sizes */
2727         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2728
2729         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2730         if (ret < 0)
2731                 return ret;
2732
2733         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2734
2735         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2736         if (ret < 0)
2737                 return ret;
2738
2739         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2740         if (ret < 0)
2741                 return ret;
2742
2743         ret = lan78xx_write_reg(dev, FLOW, 0);
2744         if (ret < 0)
2745                 return ret;
2746
2747         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2748         if (ret < 0)
2749                 return ret;
2750
2751         /* Don't need rfe_ctl_lock during initialisation */
2752         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2753         if (ret < 0)
2754                 return ret;
2755
2756         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2757
2758         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2759         if (ret < 0)
2760                 return ret;
2761
2762         /* Enable or disable checksum offload engines */
2763         ret = lan78xx_set_features(dev->net, dev->net->features);
2764         if (ret < 0)
2765                 return ret;
2766
2767         lan78xx_set_multicast(dev->net);
2768
2769         /* reset PHY */
2770         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2771         if (ret < 0)
2772                 return ret;
2773
2774         buf |= PMT_CTL_PHY_RST_;
2775
2776         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2777         if (ret < 0)
2778                 return ret;
2779
2780         timeout = jiffies + HZ;
2781         do {
2782                 mdelay(1);
2783                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2784                 if (ret < 0)
2785                         return ret;
2786
2787                 if (time_after(jiffies, timeout)) {
2788                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2789                         ret = -ETIMEDOUT;
2790                         return ret;
2791                 }
2792         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2793
2794         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2795         if (ret < 0)
2796                 return ret;
2797
2798         /* LAN7801 only has RGMII mode */
2799         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2800                 buf &= ~MAC_CR_GMII_EN_;
2801
2802         if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
2803             dev->chipid == ID_REV_CHIP_ID_7850_) {
2804                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2805                 if (!ret && sig != EEPROM_INDICATOR) {
2806                         /* Implies there is no external eeprom. Set mac speed */
2807                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2808                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2809                 }
2810         }
2811         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2812         if (ret < 0)
2813                 return ret;
2814
2815         ret = lan78xx_set_rx_max_frame_length(dev,
2816                                               dev->net->mtu + VLAN_ETH_HLEN);
2817
2818         return ret;
2819 }
2820
2821 static void lan78xx_init_stats(struct lan78xx_net *dev)
2822 {
2823         u32 *p;
2824         int i;
2825
2826         /* initialize for stats update
2827          * some counters are 20bits and some are 32bits
2828          */
2829         p = (u32 *)&dev->stats.rollover_max;
2830         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2831                 p[i] = 0xFFFFF;
2832
2833         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2834         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2835         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2836         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2837         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2838         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2839         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2840         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2841         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2842         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2843
2844         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2845 }
2846
2847 static int lan78xx_open(struct net_device *net)
2848 {
2849         struct lan78xx_net *dev = netdev_priv(net);
2850         int ret;
2851
2852         netif_dbg(dev, ifup, dev->net, "open device");
2853
2854         ret = usb_autopm_get_interface(dev->intf);
2855         if (ret < 0)
2856                 return ret;
2857
2858         mutex_lock(&dev->dev_mutex);
2859
2860         phy_start(net->phydev);
2861
2862         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2863
2864         /* for Link Check */
2865         if (dev->urb_intr) {
2866                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2867                 if (ret < 0) {
2868                         netif_err(dev, ifup, dev->net,
2869                                   "intr submit %d\n", ret);
2870                         goto done;
2871                 }
2872         }
2873
2874         ret = lan78xx_flush_rx_fifo(dev);
2875         if (ret < 0)
2876                 goto done;
2877         ret = lan78xx_flush_tx_fifo(dev);
2878         if (ret < 0)
2879                 goto done;
2880
2881         ret = lan78xx_start_tx_path(dev);
2882         if (ret < 0)
2883                 goto done;
2884         ret = lan78xx_start_rx_path(dev);
2885         if (ret < 0)
2886                 goto done;
2887
2888         lan78xx_init_stats(dev);
2889
2890         set_bit(EVENT_DEV_OPEN, &dev->flags);
2891
2892         netif_start_queue(net);
2893
2894         dev->link_on = false;
2895
2896         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2897 done:
2898         mutex_unlock(&dev->dev_mutex);
2899
2900         if (ret < 0)
2901                 usb_autopm_put_interface(dev->intf);
2902
2903         return ret;
2904 }
2905
2906 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2907 {
2908         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2909         DECLARE_WAITQUEUE(wait, current);
2910         int temp;
2911
2912         /* ensure there are no more active urbs */
2913         add_wait_queue(&unlink_wakeup, &wait);
2914         set_current_state(TASK_UNINTERRUPTIBLE);
2915         dev->wait = &unlink_wakeup;
2916         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2917
2918         /* maybe wait for deletions to finish. */
2919         while (!skb_queue_empty(&dev->rxq) ||
2920                !skb_queue_empty(&dev->txq)) {
2921                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2922                 set_current_state(TASK_UNINTERRUPTIBLE);
2923                 netif_dbg(dev, ifdown, dev->net,
2924                           "waited for %d urb completions", temp);
2925         }
2926         set_current_state(TASK_RUNNING);
2927         dev->wait = NULL;
2928         remove_wait_queue(&unlink_wakeup, &wait);
2929
2930         while (!skb_queue_empty(&dev->done)) {
2931                 struct skb_data *entry;
2932                 struct sk_buff *skb;
2933
2934                 skb = skb_dequeue(&dev->done);
2935                 entry = (struct skb_data *)(skb->cb);
2936                 usb_free_urb(entry->urb);
2937                 dev_kfree_skb(skb);
2938         }
2939 }
2940
2941 static int lan78xx_stop(struct net_device *net)
2942 {
2943         struct lan78xx_net *dev = netdev_priv(net);
2944
2945         netif_dbg(dev, ifup, dev->net, "stop device");
2946
2947         mutex_lock(&dev->dev_mutex);
2948
2949         if (timer_pending(&dev->stat_monitor))
2950                 del_timer_sync(&dev->stat_monitor);
2951
2952         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2953         netif_stop_queue(net);
2954         tasklet_kill(&dev->bh);
2955
2956         lan78xx_terminate_urbs(dev);
2957
2958         netif_info(dev, ifdown, dev->net,
2959                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2960                    net->stats.rx_packets, net->stats.tx_packets,
2961                    net->stats.rx_errors, net->stats.tx_errors);
2962
2963         /* ignore errors that occur stopping the Tx and Rx data paths */
2964         lan78xx_stop_tx_path(dev);
2965         lan78xx_stop_rx_path(dev);
2966
2967         if (net->phydev)
2968                 phy_stop(net->phydev);
2969
2970         usb_kill_urb(dev->urb_intr);
2971
2972         skb_queue_purge(&dev->rxq_pause);
2973
2974         /* deferred work (task, timer, softirq) must also stop.
2975          * can't flush_scheduled_work() until we drop rtnl (later),
2976          * else workers could deadlock; so make workers a NOP.
2977          */
2978         clear_bit(EVENT_TX_HALT, &dev->flags);
2979         clear_bit(EVENT_RX_HALT, &dev->flags);
2980         clear_bit(EVENT_LINK_RESET, &dev->flags);
2981         clear_bit(EVENT_STAT_UPDATE, &dev->flags);
2982
2983         cancel_delayed_work_sync(&dev->wq);
2984
2985         usb_autopm_put_interface(dev->intf);
2986
2987         mutex_unlock(&dev->dev_mutex);
2988
2989         return 0;
2990 }
2991
2992 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2993                                        struct sk_buff *skb, gfp_t flags)
2994 {
2995         u32 tx_cmd_a, tx_cmd_b;
2996         void *ptr;
2997
2998         if (skb_cow_head(skb, TX_OVERHEAD)) {
2999                 dev_kfree_skb_any(skb);
3000                 return NULL;
3001         }
3002
3003         if (skb_linearize(skb)) {
3004                 dev_kfree_skb_any(skb);
3005                 return NULL;
3006         }
3007
3008         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3009
3010         if (skb->ip_summed == CHECKSUM_PARTIAL)
3011                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3012
3013         tx_cmd_b = 0;
3014         if (skb_is_gso(skb)) {
3015                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3016
3017                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3018
3019                 tx_cmd_a |= TX_CMD_A_LSO_;
3020         }
3021
3022         if (skb_vlan_tag_present(skb)) {
3023                 tx_cmd_a |= TX_CMD_A_IVTG_;
3024                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3025         }
3026
3027         ptr = skb_push(skb, 8);
3028         put_unaligned_le32(tx_cmd_a, ptr);
3029         put_unaligned_le32(tx_cmd_b, ptr + 4);
3030
3031         return skb;
3032 }
3033
3034 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3035                                struct sk_buff_head *list, enum skb_state state)
3036 {
3037         unsigned long flags;
3038         enum skb_state old_state;
3039         struct skb_data *entry = (struct skb_data *)skb->cb;
3040
3041         spin_lock_irqsave(&list->lock, flags);
3042         old_state = entry->state;
3043         entry->state = state;
3044
3045         __skb_unlink(skb, list);
3046         spin_unlock(&list->lock);
3047         spin_lock(&dev->done.lock);
3048
3049         __skb_queue_tail(&dev->done, skb);
3050         if (skb_queue_len(&dev->done) == 1)
3051                 tasklet_schedule(&dev->bh);
3052         spin_unlock_irqrestore(&dev->done.lock, flags);
3053
3054         return old_state;
3055 }
3056
3057 static void tx_complete(struct urb *urb)
3058 {
3059         struct sk_buff *skb = (struct sk_buff *)urb->context;
3060         struct skb_data *entry = (struct skb_data *)skb->cb;
3061         struct lan78xx_net *dev = entry->dev;
3062
3063         if (urb->status == 0) {
3064                 dev->net->stats.tx_packets += entry->num_of_packet;
3065                 dev->net->stats.tx_bytes += entry->length;
3066         } else {
3067                 dev->net->stats.tx_errors++;
3068
3069                 switch (urb->status) {
3070                 case -EPIPE:
3071                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3072                         break;
3073
3074                 /* software-driven interface shutdown */
3075                 case -ECONNRESET:
3076                 case -ESHUTDOWN:
3077                         break;
3078
3079                 case -EPROTO:
3080                 case -ETIME:
3081                 case -EILSEQ:
3082                         netif_stop_queue(dev->net);
3083                         break;
3084                 default:
3085                         netif_dbg(dev, tx_err, dev->net,
3086                                   "tx err %d\n", entry->urb->status);
3087                         break;
3088                 }
3089         }
3090
3091         usb_autopm_put_interface_async(dev->intf);
3092
3093         defer_bh(dev, skb, &dev->txq, tx_done);
3094 }
3095
3096 static void lan78xx_queue_skb(struct sk_buff_head *list,
3097                               struct sk_buff *newsk, enum skb_state state)
3098 {
3099         struct skb_data *entry = (struct skb_data *)newsk->cb;
3100
3101         __skb_queue_tail(list, newsk);
3102         entry->state = state;
3103 }
3104
3105 static netdev_tx_t
3106 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3107 {
3108         struct lan78xx_net *dev = netdev_priv(net);
3109         struct sk_buff *skb2 = NULL;
3110
3111         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3112                 schedule_delayed_work(&dev->wq, 0);
3113
3114         if (skb) {
3115                 skb_tx_timestamp(skb);
3116                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3117         }
3118
3119         if (skb2) {
3120                 skb_queue_tail(&dev->txq_pend, skb2);
3121
3122                 /* throttle TX patch at slower than SUPER SPEED USB */
3123                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3124                     (skb_queue_len(&dev->txq_pend) > 10))
3125                         netif_stop_queue(net);
3126         } else {
3127                 netif_dbg(dev, tx_err, dev->net,
3128                           "lan78xx_tx_prep return NULL\n");
3129                 dev->net->stats.tx_errors++;
3130                 dev->net->stats.tx_dropped++;
3131         }
3132
3133         tasklet_schedule(&dev->bh);
3134
3135         return NETDEV_TX_OK;
3136 }
3137
3138 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3139 {
3140         struct lan78xx_priv *pdata = NULL;
3141         int ret;
3142         int i;
3143
3144         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3145
3146         pdata = (struct lan78xx_priv *)(dev->data[0]);
3147         if (!pdata) {
3148                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3149                 return -ENOMEM;
3150         }
3151
3152         pdata->dev = dev;
3153
3154         spin_lock_init(&pdata->rfe_ctl_lock);
3155         mutex_init(&pdata->dataport_mutex);
3156
3157         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3158
3159         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3160                 pdata->vlan_table[i] = 0;
3161
3162         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3163
3164         dev->net->features = 0;
3165
3166         if (DEFAULT_TX_CSUM_ENABLE)
3167                 dev->net->features |= NETIF_F_HW_CSUM;
3168
3169         if (DEFAULT_RX_CSUM_ENABLE)
3170                 dev->net->features |= NETIF_F_RXCSUM;
3171
3172         if (DEFAULT_TSO_CSUM_ENABLE)
3173                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3174
3175         if (DEFAULT_VLAN_RX_OFFLOAD)
3176                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3177
3178         if (DEFAULT_VLAN_FILTER_ENABLE)
3179                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3180
3181         dev->net->hw_features = dev->net->features;
3182
3183         ret = lan78xx_setup_irq_domain(dev);
3184         if (ret < 0) {
3185                 netdev_warn(dev->net,
3186                             "lan78xx_setup_irq_domain() failed : %d", ret);
3187                 goto out1;
3188         }
3189
3190         dev->net->hard_header_len += TX_OVERHEAD;
3191         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3192
3193         /* Init all registers */
3194         ret = lan78xx_reset(dev);
3195         if (ret) {
3196                 netdev_warn(dev->net, "Registers INIT FAILED....");
3197                 goto out2;
3198         }
3199
3200         ret = lan78xx_mdio_init(dev);
3201         if (ret) {
3202                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3203                 goto out2;
3204         }
3205
3206         dev->net->flags |= IFF_MULTICAST;
3207
3208         pdata->wol = WAKE_MAGIC;
3209
3210         return ret;
3211
3212 out2:
3213         lan78xx_remove_irq_domain(dev);
3214
3215 out1:
3216         netdev_warn(dev->net, "Bind routine FAILED");
3217         cancel_work_sync(&pdata->set_multicast);
3218         cancel_work_sync(&pdata->set_vlan);
3219         kfree(pdata);
3220         return ret;
3221 }
3222
3223 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3224 {
3225         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3226
3227         lan78xx_remove_irq_domain(dev);
3228
3229         lan78xx_remove_mdio(dev);
3230
3231         if (pdata) {
3232                 cancel_work_sync(&pdata->set_multicast);
3233                 cancel_work_sync(&pdata->set_vlan);
3234                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3235                 kfree(pdata);
3236                 pdata = NULL;
3237                 dev->data[0] = 0;
3238         }
3239 }
3240
3241 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3242                                     struct sk_buff *skb,
3243                                     u32 rx_cmd_a, u32 rx_cmd_b)
3244 {
3245         /* HW Checksum offload appears to be flawed if used when not stripping
3246          * VLAN headers. Drop back to S/W checksums under these conditions.
3247          */
3248         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3249             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3250             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3251              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3252                 skb->ip_summed = CHECKSUM_NONE;
3253         } else {
3254                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3255                 skb->ip_summed = CHECKSUM_COMPLETE;
3256         }
3257 }
3258
3259 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3260                                     struct sk_buff *skb,
3261                                     u32 rx_cmd_a, u32 rx_cmd_b)
3262 {
3263         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3264             (rx_cmd_a & RX_CMD_A_FVTG_))
3265                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3266                                        (rx_cmd_b & 0xffff));
3267 }
3268
3269 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3270 {
3271         int status;
3272
3273         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3274                 skb_queue_tail(&dev->rxq_pause, skb);
3275                 return;
3276         }
3277
3278         dev->net->stats.rx_packets++;
3279         dev->net->stats.rx_bytes += skb->len;
3280
3281         skb->protocol = eth_type_trans(skb, dev->net);
3282
3283         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3284                   skb->len + sizeof(struct ethhdr), skb->protocol);
3285         memset(skb->cb, 0, sizeof(struct skb_data));
3286
3287         if (skb_defer_rx_timestamp(skb))
3288                 return;
3289
3290         status = netif_rx(skb);
3291         if (status != NET_RX_SUCCESS)
3292                 netif_dbg(dev, rx_err, dev->net,
3293                           "netif_rx status %d\n", status);
3294 }
3295
3296 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3297 {
3298         if (skb->len < dev->net->hard_header_len)
3299                 return 0;
3300
3301         while (skb->len > 0) {
3302                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3303                 u16 rx_cmd_c;
3304                 struct sk_buff *skb2;
3305                 unsigned char *packet;
3306
3307                 rx_cmd_a = get_unaligned_le32(skb->data);
3308                 skb_pull(skb, sizeof(rx_cmd_a));
3309
3310                 rx_cmd_b = get_unaligned_le32(skb->data);
3311                 skb_pull(skb, sizeof(rx_cmd_b));
3312
3313                 rx_cmd_c = get_unaligned_le16(skb->data);
3314                 skb_pull(skb, sizeof(rx_cmd_c));
3315
3316                 packet = skb->data;
3317
3318                 /* get the packet length */
3319                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3320                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3321
3322                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3323                         netif_dbg(dev, rx_err, dev->net,
3324                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3325                 } else {
3326                         /* last frame in this batch */
3327                         if (skb->len == size) {
3328                                 lan78xx_rx_csum_offload(dev, skb,
3329                                                         rx_cmd_a, rx_cmd_b);
3330                                 lan78xx_rx_vlan_offload(dev, skb,
3331                                                         rx_cmd_a, rx_cmd_b);
3332
3333                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3334                                 skb->truesize = size + sizeof(struct sk_buff);
3335
3336                                 return 1;
3337                         }
3338
3339                         skb2 = skb_clone(skb, GFP_ATOMIC);
3340                         if (unlikely(!skb2)) {
3341                                 netdev_warn(dev->net, "Error allocating skb");
3342                                 return 0;
3343                         }
3344
3345                         skb2->len = size;
3346                         skb2->data = packet;
3347                         skb_set_tail_pointer(skb2, size);
3348
3349                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3350                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3351
3352                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3353                         skb2->truesize = size + sizeof(struct sk_buff);
3354
3355                         lan78xx_skb_return(dev, skb2);
3356                 }
3357
3358                 skb_pull(skb, size);
3359
3360                 /* padding bytes before the next frame starts */
3361                 if (skb->len)
3362                         skb_pull(skb, align_count);
3363         }
3364
3365         return 1;
3366 }
3367
3368 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3369 {
3370         if (!lan78xx_rx(dev, skb)) {
3371                 dev->net->stats.rx_errors++;
3372                 goto done;
3373         }
3374
3375         if (skb->len) {
3376                 lan78xx_skb_return(dev, skb);
3377                 return;
3378         }
3379
3380         netif_dbg(dev, rx_err, dev->net, "drop\n");
3381         dev->net->stats.rx_errors++;
3382 done:
3383         skb_queue_tail(&dev->done, skb);
3384 }
3385
3386 static void rx_complete(struct urb *urb);
3387
3388 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3389 {
3390         struct sk_buff *skb;
3391         struct skb_data *entry;
3392         unsigned long lockflags;
3393         size_t size = dev->rx_urb_size;
3394         int ret = 0;
3395
3396         skb = netdev_alloc_skb_ip_align(dev->net, size);
3397         if (!skb) {
3398                 usb_free_urb(urb);
3399                 return -ENOMEM;
3400         }
3401
3402         entry = (struct skb_data *)skb->cb;
3403         entry->urb = urb;
3404         entry->dev = dev;
3405         entry->length = 0;
3406
3407         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3408                           skb->data, size, rx_complete, skb);
3409
3410         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3411
3412         if (netif_device_present(dev->net) &&
3413             netif_running(dev->net) &&
3414             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3415             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3416                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3417                 switch (ret) {
3418                 case 0:
3419                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3420                         break;
3421                 case -EPIPE:
3422                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3423                         break;
3424                 case -ENODEV:
3425                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3426                         netif_device_detach(dev->net);
3427                         break;
3428                 case -EHOSTUNREACH:
3429                         ret = -ENOLINK;
3430                         break;
3431                 default:
3432                         netif_dbg(dev, rx_err, dev->net,
3433                                   "rx submit, %d\n", ret);
3434                         tasklet_schedule(&dev->bh);
3435                 }
3436         } else {
3437                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3438                 ret = -ENOLINK;
3439         }
3440         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3441         if (ret) {
3442                 dev_kfree_skb_any(skb);
3443                 usb_free_urb(urb);
3444         }
3445         return ret;
3446 }
3447
3448 static void rx_complete(struct urb *urb)
3449 {
3450         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3451         struct skb_data *entry = (struct skb_data *)skb->cb;
3452         struct lan78xx_net *dev = entry->dev;
3453         int urb_status = urb->status;
3454         enum skb_state state;
3455
3456         skb_put(skb, urb->actual_length);
3457         state = rx_done;
3458         entry->urb = NULL;
3459
3460         switch (urb_status) {
3461         case 0:
3462                 if (skb->len < dev->net->hard_header_len) {
3463                         state = rx_cleanup;
3464                         dev->net->stats.rx_errors++;
3465                         dev->net->stats.rx_length_errors++;
3466                         netif_dbg(dev, rx_err, dev->net,
3467                                   "rx length %d\n", skb->len);
3468                 }
3469                 usb_mark_last_busy(dev->udev);
3470                 break;
3471         case -EPIPE:
3472                 dev->net->stats.rx_errors++;
3473                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3474                 fallthrough;
3475         case -ECONNRESET:                               /* async unlink */
3476         case -ESHUTDOWN:                                /* hardware gone */
3477                 netif_dbg(dev, ifdown, dev->net,
3478                           "rx shutdown, code %d\n", urb_status);
3479                 state = rx_cleanup;
3480                 entry->urb = urb;
3481                 urb = NULL;
3482                 break;
3483         case -EPROTO:
3484         case -ETIME:
3485         case -EILSEQ:
3486                 dev->net->stats.rx_errors++;
3487                 state = rx_cleanup;
3488                 entry->urb = urb;
3489                 urb = NULL;
3490                 break;
3491
3492         /* data overrun ... flush fifo? */
3493         case -EOVERFLOW:
3494                 dev->net->stats.rx_over_errors++;
3495                 fallthrough;
3496
3497         default:
3498                 state = rx_cleanup;
3499                 dev->net->stats.rx_errors++;
3500                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3501                 break;
3502         }
3503
3504         state = defer_bh(dev, skb, &dev->rxq, state);
3505
3506         if (urb) {
3507                 if (netif_running(dev->net) &&
3508                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3509                     state != unlink_start) {
3510                         rx_submit(dev, urb, GFP_ATOMIC);
3511                         return;
3512                 }
3513                 usb_free_urb(urb);
3514         }
3515         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3516 }
3517
3518 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3519 {
3520         int length;
3521         struct urb *urb = NULL;
3522         struct skb_data *entry;
3523         unsigned long flags;
3524         struct sk_buff_head *tqp = &dev->txq_pend;
3525         struct sk_buff *skb, *skb2;
3526         int ret;
3527         int count, pos;
3528         int skb_totallen, pkt_cnt;
3529
3530         skb_totallen = 0;
3531         pkt_cnt = 0;
3532         count = 0;
3533         length = 0;
3534         spin_lock_irqsave(&tqp->lock, flags);
3535         skb_queue_walk(tqp, skb) {
3536                 if (skb_is_gso(skb)) {
3537                         if (!skb_queue_is_first(tqp, skb)) {
3538                                 /* handle previous packets first */
3539                                 break;
3540                         }
3541                         count = 1;
3542                         length = skb->len - TX_OVERHEAD;
3543                         __skb_unlink(skb, tqp);
3544                         spin_unlock_irqrestore(&tqp->lock, flags);
3545                         goto gso_skb;
3546                 }
3547
3548                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3549                         break;
3550                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3551                 pkt_cnt++;
3552         }
3553         spin_unlock_irqrestore(&tqp->lock, flags);
3554
3555         /* copy to a single skb */
3556         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3557         if (!skb)
3558                 goto drop;
3559
3560         skb_put(skb, skb_totallen);
3561
3562         for (count = pos = 0; count < pkt_cnt; count++) {
3563                 skb2 = skb_dequeue(tqp);
3564                 if (skb2) {
3565                         length += (skb2->len - TX_OVERHEAD);
3566                         memcpy(skb->data + pos, skb2->data, skb2->len);
3567                         pos += roundup(skb2->len, sizeof(u32));
3568                         dev_kfree_skb(skb2);
3569                 }
3570         }
3571
3572 gso_skb:
3573         urb = usb_alloc_urb(0, GFP_ATOMIC);
3574         if (!urb)
3575                 goto drop;
3576
3577         entry = (struct skb_data *)skb->cb;
3578         entry->urb = urb;
3579         entry->dev = dev;
3580         entry->length = length;
3581         entry->num_of_packet = count;
3582
3583         spin_lock_irqsave(&dev->txq.lock, flags);
3584         ret = usb_autopm_get_interface_async(dev->intf);
3585         if (ret < 0) {
3586                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3587                 goto drop;
3588         }
3589
3590         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3591                           skb->data, skb->len, tx_complete, skb);
3592
3593         if (length % dev->maxpacket == 0) {
3594                 /* send USB_ZERO_PACKET */
3595                 urb->transfer_flags |= URB_ZERO_PACKET;
3596         }
3597
3598 #ifdef CONFIG_PM
3599         /* if this triggers the device is still a sleep */
3600         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3601                 /* transmission will be done in resume */
3602                 usb_anchor_urb(urb, &dev->deferred);
3603                 /* no use to process more packets */
3604                 netif_stop_queue(dev->net);
3605                 usb_put_urb(urb);
3606                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3607                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3608                 return;
3609         }
3610 #endif
3611
3612         ret = usb_submit_urb(urb, GFP_ATOMIC);
3613         switch (ret) {
3614         case 0:
3615                 netif_trans_update(dev->net);
3616                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3617                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3618                         netif_stop_queue(dev->net);
3619                 break;
3620         case -EPIPE:
3621                 netif_stop_queue(dev->net);
3622                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3623                 usb_autopm_put_interface_async(dev->intf);
3624                 break;
3625         default:
3626                 usb_autopm_put_interface_async(dev->intf);
3627                 netif_dbg(dev, tx_err, dev->net,
3628                           "tx: submit urb err %d\n", ret);
3629                 break;
3630         }
3631
3632         spin_unlock_irqrestore(&dev->txq.lock, flags);
3633
3634         if (ret) {
3635                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3636 drop:
3637                 dev->net->stats.tx_dropped++;
3638                 if (skb)
3639                         dev_kfree_skb_any(skb);
3640                 usb_free_urb(urb);
3641         } else {
3642                 netif_dbg(dev, tx_queued, dev->net,
3643                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3644         }
3645 }
3646
3647 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3648 {
3649         struct urb *urb;
3650         int i;
3651
3652         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3653                 for (i = 0; i < 10; i++) {
3654                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3655                                 break;
3656                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3657                         if (urb)
3658                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3659                                         return;
3660                 }
3661
3662                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3663                         tasklet_schedule(&dev->bh);
3664         }
3665         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3666                 netif_wake_queue(dev->net);
3667 }
3668
3669 static void lan78xx_bh(unsigned long param)
3670 {
3671         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3672         struct sk_buff *skb;
3673         struct skb_data *entry;
3674
3675         while ((skb = skb_dequeue(&dev->done))) {
3676                 entry = (struct skb_data *)(skb->cb);
3677                 switch (entry->state) {
3678                 case rx_done:
3679                         entry->state = rx_cleanup;
3680                         rx_process(dev, skb);
3681                         continue;
3682                 case tx_done:
3683                         usb_free_urb(entry->urb);
3684                         dev_kfree_skb(skb);
3685                         continue;
3686                 case rx_cleanup:
3687                         usb_free_urb(entry->urb);
3688                         dev_kfree_skb(skb);
3689                         continue;
3690                 default:
3691                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3692                         return;
3693                 }
3694         }
3695
3696         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3697                 /* reset update timer delta */
3698                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3699                         dev->delta = 1;
3700                         mod_timer(&dev->stat_monitor,
3701                                   jiffies + STAT_UPDATE_TIMER);
3702                 }
3703
3704                 if (!skb_queue_empty(&dev->txq_pend))
3705                         lan78xx_tx_bh(dev);
3706
3707                 if (!timer_pending(&dev->delay) &&
3708                     !test_bit(EVENT_RX_HALT, &dev->flags))
3709                         lan78xx_rx_bh(dev);
3710         }
3711 }
3712
3713 static void lan78xx_delayedwork(struct work_struct *work)
3714 {
3715         int status;
3716         struct lan78xx_net *dev;
3717
3718         dev = container_of(work, struct lan78xx_net, wq.work);
3719
3720         if (usb_autopm_get_interface(dev->intf) < 0)
3721                 return;
3722
3723         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3724                 unlink_urbs(dev, &dev->txq);
3725
3726                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3727                 if (status < 0 &&
3728                     status != -EPIPE &&
3729                     status != -ESHUTDOWN) {
3730                         if (netif_msg_tx_err(dev))
3731                                 netdev_err(dev->net,
3732                                            "can't clear tx halt, status %d\n",
3733                                            status);
3734                 } else {
3735                         clear_bit(EVENT_TX_HALT, &dev->flags);
3736                         if (status != -ESHUTDOWN)
3737                                 netif_wake_queue(dev->net);
3738                 }
3739         }
3740
3741         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3742                 unlink_urbs(dev, &dev->rxq);
3743                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3744                 if (status < 0 &&
3745                     status != -EPIPE &&
3746                     status != -ESHUTDOWN) {
3747                         if (netif_msg_rx_err(dev))
3748                                 netdev_err(dev->net,
3749                                            "can't clear rx halt, status %d\n",
3750                                            status);
3751                 } else {
3752                         clear_bit(EVENT_RX_HALT, &dev->flags);
3753                         tasklet_schedule(&dev->bh);
3754                 }
3755         }
3756
3757         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3758                 int ret = 0;
3759
3760                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3761                 if (lan78xx_link_reset(dev) < 0) {
3762                         netdev_info(dev->net, "link reset failed (%d)\n",
3763                                     ret);
3764                 }
3765         }
3766
3767         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3768                 lan78xx_update_stats(dev);
3769
3770                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3771
3772                 mod_timer(&dev->stat_monitor,
3773                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3774
3775                 dev->delta = min((dev->delta * 2), 50);
3776         }
3777
3778         usb_autopm_put_interface(dev->intf);
3779 }
3780
3781 static void intr_complete(struct urb *urb)
3782 {
3783         struct lan78xx_net *dev = urb->context;
3784         int status = urb->status;
3785
3786         switch (status) {
3787         /* success */
3788         case 0:
3789                 lan78xx_status(dev, urb);
3790                 break;
3791
3792         /* software-driven interface shutdown */
3793         case -ENOENT:                   /* urb killed */
3794         case -ESHUTDOWN:                /* hardware gone */
3795                 netif_dbg(dev, ifdown, dev->net,
3796                           "intr shutdown, code %d\n", status);
3797                 return;
3798
3799         /* NOTE:  not throttling like RX/TX, since this endpoint
3800          * already polls infrequently
3801          */
3802         default:
3803                 netdev_dbg(dev->net, "intr status %d\n", status);
3804                 break;
3805         }
3806
3807         if (!netif_running(dev->net))
3808                 return;
3809
3810         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3811         status = usb_submit_urb(urb, GFP_ATOMIC);
3812         if (status != 0)
3813                 netif_err(dev, timer, dev->net,
3814                           "intr resubmit --> %d\n", status);
3815 }
3816
3817 static void lan78xx_disconnect(struct usb_interface *intf)
3818 {
3819         struct lan78xx_net *dev;
3820         struct usb_device *udev;
3821         struct net_device *net;
3822         struct phy_device *phydev;
3823
3824         dev = usb_get_intfdata(intf);
3825         usb_set_intfdata(intf, NULL);
3826         if (!dev)
3827                 return;
3828
3829         udev = interface_to_usbdev(intf);
3830         net = dev->net;
3831         phydev = net->phydev;
3832
3833         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3834         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3835
3836         phy_disconnect(net->phydev);
3837
3838         if (phy_is_pseudo_fixed_link(phydev))
3839                 fixed_phy_unregister(phydev);
3840
3841         unregister_netdev(net);
3842
3843         cancel_delayed_work_sync(&dev->wq);
3844
3845         usb_scuttle_anchored_urbs(&dev->deferred);
3846
3847         lan78xx_unbind(dev, intf);
3848
3849         usb_kill_urb(dev->urb_intr);
3850         usb_free_urb(dev->urb_intr);
3851
3852         free_netdev(net);
3853         usb_put_dev(udev);
3854 }
3855
3856 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3857 {
3858         struct lan78xx_net *dev = netdev_priv(net);
3859
3860         unlink_urbs(dev, &dev->txq);
3861         tasklet_schedule(&dev->bh);
3862 }
3863
3864 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3865                                                 struct net_device *netdev,
3866                                                 netdev_features_t features)
3867 {
3868         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3869                 features &= ~NETIF_F_GSO_MASK;
3870
3871         features = vlan_features_check(skb, features);
3872         features = vxlan_features_check(skb, features);
3873
3874         return features;
3875 }
3876
3877 static const struct net_device_ops lan78xx_netdev_ops = {
3878         .ndo_open               = lan78xx_open,
3879         .ndo_stop               = lan78xx_stop,
3880         .ndo_start_xmit         = lan78xx_start_xmit,
3881         .ndo_tx_timeout         = lan78xx_tx_timeout,
3882         .ndo_change_mtu         = lan78xx_change_mtu,
3883         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3884         .ndo_validate_addr      = eth_validate_addr,
3885         .ndo_do_ioctl           = phy_do_ioctl_running,
3886         .ndo_set_rx_mode        = lan78xx_set_multicast,
3887         .ndo_set_features       = lan78xx_set_features,
3888         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3889         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3890         .ndo_features_check     = lan78xx_features_check,
3891 };
3892
3893 static void lan78xx_stat_monitor(struct timer_list *t)
3894 {
3895         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3896
3897         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3898 }
3899
3900 static int lan78xx_probe(struct usb_interface *intf,
3901                          const struct usb_device_id *id)
3902 {
3903         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3904         struct lan78xx_net *dev;
3905         struct net_device *netdev;
3906         struct usb_device *udev;
3907         int ret;
3908         unsigned int maxp;
3909         unsigned int period;
3910         u8 *buf = NULL;
3911
3912         udev = interface_to_usbdev(intf);
3913         udev = usb_get_dev(udev);
3914
3915         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3916         if (!netdev) {
3917                 dev_err(&intf->dev, "Error: OOM\n");
3918                 ret = -ENOMEM;
3919                 goto out1;
3920         }
3921
3922         /* netdev_printk() needs this */
3923         SET_NETDEV_DEV(netdev, &intf->dev);
3924
3925         dev = netdev_priv(netdev);
3926         dev->udev = udev;
3927         dev->intf = intf;
3928         dev->net = netdev;
3929         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3930                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3931
3932         skb_queue_head_init(&dev->rxq);
3933         skb_queue_head_init(&dev->txq);
3934         skb_queue_head_init(&dev->done);
3935         skb_queue_head_init(&dev->rxq_pause);
3936         skb_queue_head_init(&dev->txq_pend);
3937         mutex_init(&dev->phy_mutex);
3938         mutex_init(&dev->dev_mutex);
3939
3940         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3941         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3942         init_usb_anchor(&dev->deferred);
3943
3944         netdev->netdev_ops = &lan78xx_netdev_ops;
3945         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3946         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3947
3948         dev->delta = 1;
3949         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3950
3951         mutex_init(&dev->stats.access_lock);
3952
3953         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3954                 ret = -ENODEV;
3955                 goto out2;
3956         }
3957
3958         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3959         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3960         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3961                 ret = -ENODEV;
3962                 goto out2;
3963         }
3964
3965         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3966         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3967         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3968                 ret = -ENODEV;
3969                 goto out2;
3970         }
3971
3972         ep_intr = &intf->cur_altsetting->endpoint[2];
3973         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3974                 ret = -ENODEV;
3975                 goto out2;
3976         }
3977
3978         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3979                                         usb_endpoint_num(&ep_intr->desc));
3980
3981         ret = lan78xx_bind(dev, intf);
3982         if (ret < 0)
3983                 goto out2;
3984
3985         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3986                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3987
3988         /* MTU range: 68 - 9000 */
3989         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3990         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3991
3992         period = ep_intr->desc.bInterval;
3993         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3994         buf = kmalloc(maxp, GFP_KERNEL);
3995         if (buf) {
3996                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3997                 if (!dev->urb_intr) {
3998                         ret = -ENOMEM;
3999                         kfree(buf);
4000                         goto out3;
4001                 } else {
4002                         usb_fill_int_urb(dev->urb_intr, dev->udev,
4003                                          dev->pipe_intr, buf, maxp,
4004                                          intr_complete, dev, period);
4005                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4006                 }
4007         }
4008
4009         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4010
4011         /* Reject broken descriptors. */
4012         if (dev->maxpacket == 0) {
4013                 ret = -ENODEV;
4014                 goto out4;
4015         }
4016
4017         /* driver requires remote-wakeup capability during autosuspend. */
4018         intf->needs_remote_wakeup = 1;
4019
4020         ret = lan78xx_phy_init(dev);
4021         if (ret < 0)
4022                 goto out4;
4023
4024         ret = register_netdev(netdev);
4025         if (ret != 0) {
4026                 netif_err(dev, probe, netdev, "couldn't register the device\n");
4027                 goto out5;
4028         }
4029
4030         usb_set_intfdata(intf, dev);
4031
4032         ret = device_set_wakeup_enable(&udev->dev, true);
4033
4034          /* Default delay of 2sec has more overhead than advantage.
4035           * Set to 10sec as default.
4036           */
4037         pm_runtime_set_autosuspend_delay(&udev->dev,
4038                                          DEFAULT_AUTOSUSPEND_DELAY);
4039
4040         return 0;
4041
4042 out5:
4043         phy_disconnect(netdev->phydev);
4044 out4:
4045         usb_free_urb(dev->urb_intr);
4046 out3:
4047         lan78xx_unbind(dev, intf);
4048 out2:
4049         free_netdev(netdev);
4050 out1:
4051         usb_put_dev(udev);
4052
4053         return ret;
4054 }
4055
4056 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4057 {
4058         const u16 crc16poly = 0x8005;
4059         int i;
4060         u16 bit, crc, msb;
4061         u8 data;
4062
4063         crc = 0xFFFF;
4064         for (i = 0; i < len; i++) {
4065                 data = *buf++;
4066                 for (bit = 0; bit < 8; bit++) {
4067                         msb = crc >> 15;
4068                         crc <<= 1;
4069
4070                         if (msb ^ (u16)(data & 1)) {
4071                                 crc ^= crc16poly;
4072                                 crc |= (u16)0x0001U;
4073                         }
4074                         data >>= 1;
4075                 }
4076         }
4077
4078         return crc;
4079 }
4080
4081 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4082 {
4083         u32 buf;
4084         int ret;
4085
4086         ret = lan78xx_stop_tx_path(dev);
4087         if (ret < 0)
4088                 return ret;
4089
4090         ret = lan78xx_stop_rx_path(dev);
4091         if (ret < 0)
4092                 return ret;
4093
4094         /* auto suspend (selective suspend) */
4095
4096         ret = lan78xx_write_reg(dev, WUCSR, 0);
4097         if (ret < 0)
4098                 return ret;
4099         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4100         if (ret < 0)
4101                 return ret;
4102         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4103         if (ret < 0)
4104                 return ret;
4105
4106         /* set goodframe wakeup */
4107
4108         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4109         if (ret < 0)
4110                 return ret;
4111
4112         buf |= WUCSR_RFE_WAKE_EN_;
4113         buf |= WUCSR_STORE_WAKE_;
4114
4115         ret = lan78xx_write_reg(dev, WUCSR, buf);
4116         if (ret < 0)
4117                 return ret;
4118
4119         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4120         if (ret < 0)
4121                 return ret;
4122
4123         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4124         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4125         buf |= PMT_CTL_PHY_WAKE_EN_;
4126         buf |= PMT_CTL_WOL_EN_;
4127         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4128         buf |= PMT_CTL_SUS_MODE_3_;
4129
4130         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4131         if (ret < 0)
4132                 return ret;
4133
4134         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4135         if (ret < 0)
4136                 return ret;
4137
4138         buf |= PMT_CTL_WUPS_MASK_;
4139
4140         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4141         if (ret < 0)
4142                 return ret;
4143
4144         ret = lan78xx_start_rx_path(dev);
4145
4146         return ret;
4147 }
4148
4149 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4150 {
4151         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4152         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4153         const u8 arp_type[2] = { 0x08, 0x06 };
4154         u32 temp_pmt_ctl;
4155         int mask_index;
4156         u32 temp_wucsr;
4157         u32 buf;
4158         u16 crc;
4159         int ret;
4160
4161         ret = lan78xx_stop_tx_path(dev);
4162         if (ret < 0)
4163                 return ret;
4164         ret = lan78xx_stop_rx_path(dev);
4165         if (ret < 0)
4166                 return ret;
4167
4168         ret = lan78xx_write_reg(dev, WUCSR, 0);
4169         if (ret < 0)
4170                 return ret;
4171         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4172         if (ret < 0)
4173                 return ret;
4174         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4175         if (ret < 0)
4176                 return ret;
4177
4178         temp_wucsr = 0;
4179
4180         temp_pmt_ctl = 0;
4181
4182         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4183         if (ret < 0)
4184                 return ret;
4185
4186         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4187         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4188
4189         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4190                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4191                 if (ret < 0)
4192                         return ret;
4193         }
4194
4195         mask_index = 0;
4196         if (wol & WAKE_PHY) {
4197                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4198
4199                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4200                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4201                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4202         }
4203         if (wol & WAKE_MAGIC) {
4204                 temp_wucsr |= WUCSR_MPEN_;
4205
4206                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4207                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4208                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4209         }
4210         if (wol & WAKE_BCAST) {
4211                 temp_wucsr |= WUCSR_BCST_EN_;
4212
4213                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4214                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4215                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4216         }
4217         if (wol & WAKE_MCAST) {
4218                 temp_wucsr |= WUCSR_WAKE_EN_;
4219
4220                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4221                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4222                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4223                                         WUF_CFGX_EN_ |
4224                                         WUF_CFGX_TYPE_MCAST_ |
4225                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4226                                         (crc & WUF_CFGX_CRC16_MASK_));
4227                 if (ret < 0)
4228                         return ret;
4229
4230                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4231                 if (ret < 0)
4232                         return ret;
4233                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4234                 if (ret < 0)
4235                         return ret;
4236                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4237                 if (ret < 0)
4238                         return ret;
4239                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4240                 if (ret < 0)
4241                         return ret;
4242
4243                 mask_index++;
4244
4245                 /* for IPv6 Multicast */
4246                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4247                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4248                                         WUF_CFGX_EN_ |
4249                                         WUF_CFGX_TYPE_MCAST_ |
4250                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4251                                         (crc & WUF_CFGX_CRC16_MASK_));
4252                 if (ret < 0)
4253                         return ret;
4254
4255                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4256                 if (ret < 0)
4257                         return ret;
4258                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4259                 if (ret < 0)
4260                         return ret;
4261                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4262                 if (ret < 0)
4263                         return ret;
4264                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4265                 if (ret < 0)
4266                         return ret;
4267
4268                 mask_index++;
4269
4270                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4271                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4272                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4273         }
4274         if (wol & WAKE_UCAST) {
4275                 temp_wucsr |= WUCSR_PFDA_EN_;
4276
4277                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4278                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4279                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4280         }
4281         if (wol & WAKE_ARP) {
4282                 temp_wucsr |= WUCSR_WAKE_EN_;
4283
4284                 /* set WUF_CFG & WUF_MASK
4285                  * for packettype (offset 12,13) = ARP (0x0806)
4286                  */
4287                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4288                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4289                                         WUF_CFGX_EN_ |
4290                                         WUF_CFGX_TYPE_ALL_ |
4291                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4292                                         (crc & WUF_CFGX_CRC16_MASK_));
4293                 if (ret < 0)
4294                         return ret;
4295
4296                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4297                 if (ret < 0)
4298                         return ret;
4299                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4300                 if (ret < 0)
4301                         return ret;
4302                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4303                 if (ret < 0)
4304                         return ret;
4305                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4306                 if (ret < 0)
4307                         return ret;
4308
4309                 mask_index++;
4310
4311                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4312                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4313                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4314         }
4315
4316         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4317         if (ret < 0)
4318                 return ret;
4319
4320         /* when multiple WOL bits are set */
4321         if (hweight_long((unsigned long)wol) > 1) {
4322                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4323                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4324                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4325         }
4326         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4327         if (ret < 0)
4328                 return ret;
4329
4330         /* clear WUPS */
4331         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4332         if (ret < 0)
4333                 return ret;
4334
4335         buf |= PMT_CTL_WUPS_MASK_;
4336
4337         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4338         if (ret < 0)
4339                 return ret;
4340
4341         ret = lan78xx_start_rx_path(dev);
4342
4343         return ret;
4344 }
4345
4346 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4347 {
4348         struct lan78xx_net *dev = usb_get_intfdata(intf);
4349         bool dev_open;
4350         int ret;
4351
4352         mutex_lock(&dev->dev_mutex);
4353
4354         netif_dbg(dev, ifdown, dev->net,
4355                   "suspending: pm event %#x", message.event);
4356
4357         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4358
4359         if (dev_open) {
4360                 spin_lock_irq(&dev->txq.lock);
4361                 /* don't autosuspend while transmitting */
4362                 if ((skb_queue_len(&dev->txq) ||
4363                      skb_queue_len(&dev->txq_pend)) &&
4364                     PMSG_IS_AUTO(message)) {
4365                         spin_unlock_irq(&dev->txq.lock);
4366                         ret = -EBUSY;
4367                         goto out;
4368                 } else {
4369                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4370                         spin_unlock_irq(&dev->txq.lock);
4371                 }
4372
4373                 /* stop RX */
4374                 ret = lan78xx_stop_rx_path(dev);
4375                 if (ret < 0)
4376                         goto out;
4377
4378                 ret = lan78xx_flush_rx_fifo(dev);
4379                 if (ret < 0)
4380                         goto out;
4381
4382                 /* stop Tx */
4383                 ret = lan78xx_stop_tx_path(dev);
4384                 if (ret < 0)
4385                         goto out;
4386
4387                 /* empty out the Rx and Tx queues */
4388                 netif_device_detach(dev->net);
4389                 lan78xx_terminate_urbs(dev);
4390                 usb_kill_urb(dev->urb_intr);
4391
4392                 /* reattach */
4393                 netif_device_attach(dev->net);
4394
4395                 del_timer(&dev->stat_monitor);
4396
4397                 if (PMSG_IS_AUTO(message)) {
4398                         ret = lan78xx_set_auto_suspend(dev);
4399                         if (ret < 0)
4400                                 goto out;
4401                 } else {
4402                         struct lan78xx_priv *pdata;
4403
4404                         pdata = (struct lan78xx_priv *)(dev->data[0]);
4405                         netif_carrier_off(dev->net);
4406                         ret = lan78xx_set_suspend(dev, pdata->wol);
4407                         if (ret < 0)
4408                                 goto out;
4409                 }
4410         } else {
4411                 /* Interface is down; don't allow WOL and PHY
4412                  * events to wake up the host
4413                  */
4414                 u32 buf;
4415
4416                 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4417
4418                 ret = lan78xx_write_reg(dev, WUCSR, 0);
4419                 if (ret < 0)
4420                         goto out;
4421                 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4422                 if (ret < 0)
4423                         goto out;
4424
4425                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4426                 if (ret < 0)
4427                         goto out;
4428
4429                 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4430                 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4431                 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4432                 buf |= PMT_CTL_SUS_MODE_3_;
4433
4434                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4435                 if (ret < 0)
4436                         goto out;
4437
4438                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4439                 if (ret < 0)
4440                         goto out;
4441
4442                 buf |= PMT_CTL_WUPS_MASK_;
4443
4444                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4445                 if (ret < 0)
4446                         goto out;
4447         }
4448
4449         ret = 0;
4450 out:
4451         mutex_unlock(&dev->dev_mutex);
4452
4453         return ret;
4454 }
4455
4456 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4457 {
4458         bool pipe_halted = false;
4459         struct urb *urb;
4460
4461         while ((urb = usb_get_from_anchor(&dev->deferred))) {
4462                 struct sk_buff *skb = urb->context;
4463                 int ret;
4464
4465                 if (!netif_device_present(dev->net) ||
4466                     !netif_carrier_ok(dev->net) ||
4467                     pipe_halted) {
4468                         usb_free_urb(urb);
4469                         dev_kfree_skb(skb);
4470                         continue;
4471                 }
4472
4473                 ret = usb_submit_urb(urb, GFP_ATOMIC);
4474
4475                 if (ret == 0) {
4476                         netif_trans_update(dev->net);
4477                         lan78xx_queue_skb(&dev->txq, skb, tx_start);
4478                 } else {
4479                         usb_free_urb(urb);
4480                         dev_kfree_skb(skb);
4481
4482                         if (ret == -EPIPE) {
4483                                 netif_stop_queue(dev->net);
4484                                 pipe_halted = true;
4485                         } else if (ret == -ENODEV) {
4486                                 netif_device_detach(dev->net);
4487                         }
4488                 }
4489         }
4490
4491         return pipe_halted;
4492 }
4493
4494 static int lan78xx_resume(struct usb_interface *intf)
4495 {
4496         struct lan78xx_net *dev = usb_get_intfdata(intf);
4497         bool dev_open;
4498         int ret;
4499
4500         mutex_lock(&dev->dev_mutex);
4501
4502         netif_dbg(dev, ifup, dev->net, "resuming device");
4503
4504         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4505
4506         if (dev_open) {
4507                 bool pipe_halted = false;
4508
4509                 ret = lan78xx_flush_tx_fifo(dev);
4510                 if (ret < 0)
4511                         goto out;
4512
4513                 if (dev->urb_intr) {
4514                         int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4515
4516                         if (ret < 0) {
4517                                 if (ret == -ENODEV)
4518                                         netif_device_detach(dev->net);
4519
4520                         netdev_warn(dev->net, "Failed to submit intr URB");
4521                         }
4522                 }
4523
4524                 spin_lock_irq(&dev->txq.lock);
4525
4526                 if (netif_device_present(dev->net)) {
4527                         pipe_halted = lan78xx_submit_deferred_urbs(dev);
4528
4529                         if (pipe_halted)
4530                                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4531                 }
4532
4533                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4534
4535                 spin_unlock_irq(&dev->txq.lock);
4536
4537                 if (!pipe_halted &&
4538                     netif_device_present(dev->net) &&
4539                     (skb_queue_len(&dev->txq) < dev->tx_qlen))
4540                         netif_start_queue(dev->net);
4541
4542                 ret = lan78xx_start_tx_path(dev);
4543                 if (ret < 0)
4544                         goto out;
4545
4546                 tasklet_schedule(&dev->bh);
4547
4548                 if (!timer_pending(&dev->stat_monitor)) {
4549                         dev->delta = 1;
4550                         mod_timer(&dev->stat_monitor,
4551                                   jiffies + STAT_UPDATE_TIMER);
4552                 }
4553
4554         } else {
4555                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4556         }
4557
4558         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4559         if (ret < 0)
4560                 goto out;
4561         ret = lan78xx_write_reg(dev, WUCSR, 0);
4562         if (ret < 0)
4563                 goto out;
4564         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4565         if (ret < 0)
4566                 goto out;
4567
4568         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4569                                              WUCSR2_ARP_RCD_ |
4570                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4571                                              WUCSR2_IPV4_TCPSYN_RCD_);
4572         if (ret < 0)
4573                 goto out;
4574
4575         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4576                                             WUCSR_EEE_RX_WAKE_ |
4577                                             WUCSR_PFDA_FR_ |
4578                                             WUCSR_RFE_WAKE_FR_ |
4579                                             WUCSR_WUFR_ |
4580                                             WUCSR_MPR_ |
4581                                             WUCSR_BCST_FR_);
4582         if (ret < 0)
4583                 goto out;
4584
4585         ret = 0;
4586 out:
4587         mutex_unlock(&dev->dev_mutex);
4588
4589         return ret;
4590 }
4591
4592 static int lan78xx_reset_resume(struct usb_interface *intf)
4593 {
4594         struct lan78xx_net *dev = usb_get_intfdata(intf);
4595         int ret;
4596
4597         netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4598
4599         ret = lan78xx_reset(dev);
4600         if (ret < 0)
4601                 return ret;
4602
4603         phy_start(dev->net->phydev);
4604
4605         ret = lan78xx_resume(intf);
4606
4607         return ret;
4608 }
4609
4610 static const struct usb_device_id products[] = {
4611         {
4612         /* LAN7800 USB Gigabit Ethernet Device */
4613         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4614         },
4615         {
4616         /* LAN7850 USB Gigabit Ethernet Device */
4617         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4618         },
4619         {
4620         /* LAN7801 USB Gigabit Ethernet Device */
4621         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4622         },
4623         {
4624         /* ATM2-AF USB Gigabit Ethernet Device */
4625         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4626         },
4627         {},
4628 };
4629 MODULE_DEVICE_TABLE(usb, products);
4630
4631 static struct usb_driver lan78xx_driver = {
4632         .name                   = DRIVER_NAME,
4633         .id_table               = products,
4634         .probe                  = lan78xx_probe,
4635         .disconnect             = lan78xx_disconnect,
4636         .suspend                = lan78xx_suspend,
4637         .resume                 = lan78xx_resume,
4638         .reset_resume           = lan78xx_reset_resume,
4639         .supports_autosuspend   = 1,
4640         .disable_hub_initiated_lpm = 1,
4641 };
4642
4643 module_usb_driver(lan78xx_driver);
4644
4645 MODULE_AUTHOR(DRIVER_AUTHOR);
4646 MODULE_DESCRIPTION(DRIVER_DESC);
4647 MODULE_LICENSE("GPL");