GNU Linux-libre 4.14.259-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43
44 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME     "lan78xx"
47 #define DRIVER_VERSION  "1.0.6"
48
49 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
50 #define THROTTLE_JIFFIES                (HZ / 8)
51 #define UNLINK_TIMEOUT_MS               3
52
53 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
54
55 #define SS_USB_PKT_SIZE                 (1024)
56 #define HS_USB_PKT_SIZE                 (512)
57 #define FS_USB_PKT_SIZE                 (64)
58
59 #define MAX_RX_FIFO_SIZE                (12 * 1024)
60 #define MAX_TX_FIFO_SIZE                (12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY           (0x0800)
63 #define MAX_SINGLE_PACKET_SIZE          (9000)
64 #define DEFAULT_TX_CSUM_ENABLE          (true)
65 #define DEFAULT_RX_CSUM_ENABLE          (true)
66 #define DEFAULT_TSO_CSUM_ENABLE         (true)
67 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
68 #define TX_OVERHEAD                     (8)
69 #define RXW_PADDING                     2
70
71 #define LAN78XX_USB_VENDOR_ID           (0x0424)
72 #define LAN7800_USB_PRODUCT_ID          (0x7800)
73 #define LAN7850_USB_PRODUCT_ID          (0x7850)
74 #define LAN7801_USB_PRODUCT_ID          (0x7801)
75 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
76 #define LAN78XX_OTP_MAGIC               (0x78F3)
77
78 #define MII_READ                        1
79 #define MII_WRITE                       0
80
81 #define EEPROM_INDICATOR                (0xA5)
82 #define EEPROM_MAC_OFFSET               (0x01)
83 #define MAX_EEPROM_SIZE                 512
84 #define OTP_INDICATOR_1                 (0xF3)
85 #define OTP_INDICATOR_2                 (0xF7)
86
87 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
88                                          WAKE_MCAST | WAKE_BCAST | \
89                                          WAKE_ARP | WAKE_MAGIC)
90
91 /* USB related defines */
92 #define BULK_IN_PIPE                    1
93 #define BULK_OUT_PIPE                   2
94
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
97
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER               (1 * 1000)
100
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP                      (32)
103 #define INT_EP_INTEP                    (31)
104 #define INT_EP_OTP_WR_DONE              (28)
105 #define INT_EP_EEE_TX_LPI_START         (26)
106 #define INT_EP_EEE_TX_LPI_STOP          (25)
107 #define INT_EP_EEE_RX_LPI               (24)
108 #define INT_EP_MAC_RESET_TIMEOUT        (23)
109 #define INT_EP_RDFO                     (22)
110 #define INT_EP_TXE                      (21)
111 #define INT_EP_USB_STATUS               (20)
112 #define INT_EP_TX_DIS                   (19)
113 #define INT_EP_RX_DIS                   (18)
114 #define INT_EP_PHY                      (17)
115 #define INT_EP_DP                       (16)
116 #define INT_EP_MAC_ERR                  (15)
117 #define INT_EP_TDFU                     (14)
118 #define INT_EP_TDFO                     (13)
119 #define INT_EP_UTX                      (12)
120 #define INT_EP_GPIO_11                  (11)
121 #define INT_EP_GPIO_10                  (10)
122 #define INT_EP_GPIO_9                   (9)
123 #define INT_EP_GPIO_8                   (8)
124 #define INT_EP_GPIO_7                   (7)
125 #define INT_EP_GPIO_6                   (6)
126 #define INT_EP_GPIO_5                   (5)
127 #define INT_EP_GPIO_4                   (4)
128 #define INT_EP_GPIO_3                   (3)
129 #define INT_EP_GPIO_2                   (2)
130 #define INT_EP_GPIO_1                   (1)
131 #define INT_EP_GPIO_0                   (0)
132
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134         "RX FCS Errors",
135         "RX Alignment Errors",
136         "Rx Fragment Errors",
137         "RX Jabber Errors",
138         "RX Undersize Frame Errors",
139         "RX Oversize Frame Errors",
140         "RX Dropped Frames",
141         "RX Unicast Byte Count",
142         "RX Broadcast Byte Count",
143         "RX Multicast Byte Count",
144         "RX Unicast Frames",
145         "RX Broadcast Frames",
146         "RX Multicast Frames",
147         "RX Pause Frames",
148         "RX 64 Byte Frames",
149         "RX 65 - 127 Byte Frames",
150         "RX 128 - 255 Byte Frames",
151         "RX 256 - 511 Bytes Frames",
152         "RX 512 - 1023 Byte Frames",
153         "RX 1024 - 1518 Byte Frames",
154         "RX Greater 1518 Byte Frames",
155         "EEE RX LPI Transitions",
156         "EEE RX LPI Time",
157         "TX FCS Errors",
158         "TX Excess Deferral Errors",
159         "TX Carrier Errors",
160         "TX Bad Byte Count",
161         "TX Single Collisions",
162         "TX Multiple Collisions",
163         "TX Excessive Collision",
164         "TX Late Collisions",
165         "TX Unicast Byte Count",
166         "TX Broadcast Byte Count",
167         "TX Multicast Byte Count",
168         "TX Unicast Frames",
169         "TX Broadcast Frames",
170         "TX Multicast Frames",
171         "TX Pause Frames",
172         "TX 64 Byte Frames",
173         "TX 65 - 127 Byte Frames",
174         "TX 128 - 255 Byte Frames",
175         "TX 256 - 511 Bytes Frames",
176         "TX 512 - 1023 Byte Frames",
177         "TX 1024 - 1518 Byte Frames",
178         "TX Greater 1518 Byte Frames",
179         "EEE TX LPI Transitions",
180         "EEE TX LPI Time",
181 };
182
183 struct lan78xx_statstage {
184         u32 rx_fcs_errors;
185         u32 rx_alignment_errors;
186         u32 rx_fragment_errors;
187         u32 rx_jabber_errors;
188         u32 rx_undersize_frame_errors;
189         u32 rx_oversize_frame_errors;
190         u32 rx_dropped_frames;
191         u32 rx_unicast_byte_count;
192         u32 rx_broadcast_byte_count;
193         u32 rx_multicast_byte_count;
194         u32 rx_unicast_frames;
195         u32 rx_broadcast_frames;
196         u32 rx_multicast_frames;
197         u32 rx_pause_frames;
198         u32 rx_64_byte_frames;
199         u32 rx_65_127_byte_frames;
200         u32 rx_128_255_byte_frames;
201         u32 rx_256_511_bytes_frames;
202         u32 rx_512_1023_byte_frames;
203         u32 rx_1024_1518_byte_frames;
204         u32 rx_greater_1518_byte_frames;
205         u32 eee_rx_lpi_transitions;
206         u32 eee_rx_lpi_time;
207         u32 tx_fcs_errors;
208         u32 tx_excess_deferral_errors;
209         u32 tx_carrier_errors;
210         u32 tx_bad_byte_count;
211         u32 tx_single_collisions;
212         u32 tx_multiple_collisions;
213         u32 tx_excessive_collision;
214         u32 tx_late_collisions;
215         u32 tx_unicast_byte_count;
216         u32 tx_broadcast_byte_count;
217         u32 tx_multicast_byte_count;
218         u32 tx_unicast_frames;
219         u32 tx_broadcast_frames;
220         u32 tx_multicast_frames;
221         u32 tx_pause_frames;
222         u32 tx_64_byte_frames;
223         u32 tx_65_127_byte_frames;
224         u32 tx_128_255_byte_frames;
225         u32 tx_256_511_bytes_frames;
226         u32 tx_512_1023_byte_frames;
227         u32 tx_1024_1518_byte_frames;
228         u32 tx_greater_1518_byte_frames;
229         u32 eee_tx_lpi_transitions;
230         u32 eee_tx_lpi_time;
231 };
232
233 struct lan78xx_statstage64 {
234         u64 rx_fcs_errors;
235         u64 rx_alignment_errors;
236         u64 rx_fragment_errors;
237         u64 rx_jabber_errors;
238         u64 rx_undersize_frame_errors;
239         u64 rx_oversize_frame_errors;
240         u64 rx_dropped_frames;
241         u64 rx_unicast_byte_count;
242         u64 rx_broadcast_byte_count;
243         u64 rx_multicast_byte_count;
244         u64 rx_unicast_frames;
245         u64 rx_broadcast_frames;
246         u64 rx_multicast_frames;
247         u64 rx_pause_frames;
248         u64 rx_64_byte_frames;
249         u64 rx_65_127_byte_frames;
250         u64 rx_128_255_byte_frames;
251         u64 rx_256_511_bytes_frames;
252         u64 rx_512_1023_byte_frames;
253         u64 rx_1024_1518_byte_frames;
254         u64 rx_greater_1518_byte_frames;
255         u64 eee_rx_lpi_transitions;
256         u64 eee_rx_lpi_time;
257         u64 tx_fcs_errors;
258         u64 tx_excess_deferral_errors;
259         u64 tx_carrier_errors;
260         u64 tx_bad_byte_count;
261         u64 tx_single_collisions;
262         u64 tx_multiple_collisions;
263         u64 tx_excessive_collision;
264         u64 tx_late_collisions;
265         u64 tx_unicast_byte_count;
266         u64 tx_broadcast_byte_count;
267         u64 tx_multicast_byte_count;
268         u64 tx_unicast_frames;
269         u64 tx_broadcast_frames;
270         u64 tx_multicast_frames;
271         u64 tx_pause_frames;
272         u64 tx_64_byte_frames;
273         u64 tx_65_127_byte_frames;
274         u64 tx_128_255_byte_frames;
275         u64 tx_256_511_bytes_frames;
276         u64 tx_512_1023_byte_frames;
277         u64 tx_1024_1518_byte_frames;
278         u64 tx_greater_1518_byte_frames;
279         u64 eee_tx_lpi_transitions;
280         u64 eee_tx_lpi_time;
281 };
282
283 struct lan78xx_net;
284
285 struct lan78xx_priv {
286         struct lan78xx_net *dev;
287         u32 rfe_ctl;
288         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
289         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
290         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
291         struct mutex dataport_mutex; /* for dataport access */
292         spinlock_t rfe_ctl_lock; /* for rfe register access */
293         struct work_struct set_multicast;
294         struct work_struct set_vlan;
295         u32 wol;
296 };
297
298 enum skb_state {
299         illegal = 0,
300         tx_start,
301         tx_done,
302         rx_start,
303         rx_done,
304         rx_cleanup,
305         unlink_start
306 };
307
308 struct skb_data {               /* skb->cb is one of these */
309         struct urb *urb;
310         struct lan78xx_net *dev;
311         enum skb_state state;
312         size_t length;
313         int num_of_packet;
314 };
315
316 struct usb_context {
317         struct usb_ctrlrequest req;
318         struct lan78xx_net *dev;
319 };
320
321 #define EVENT_TX_HALT                   0
322 #define EVENT_RX_HALT                   1
323 #define EVENT_RX_MEMORY                 2
324 #define EVENT_STS_SPLIT                 3
325 #define EVENT_LINK_RESET                4
326 #define EVENT_RX_PAUSED                 5
327 #define EVENT_DEV_WAKING                6
328 #define EVENT_DEV_ASLEEP                7
329 #define EVENT_DEV_OPEN                  8
330 #define EVENT_STAT_UPDATE               9
331
332 struct statstage {
333         struct mutex                    access_lock;    /* for stats access */
334         struct lan78xx_statstage        saved;
335         struct lan78xx_statstage        rollover_count;
336         struct lan78xx_statstage        rollover_max;
337         struct lan78xx_statstage64      curr_stat;
338 };
339
340 struct irq_domain_data {
341         struct irq_domain       *irqdomain;
342         unsigned int            phyirq;
343         struct irq_chip         *irqchip;
344         irq_flow_handler_t      irq_handler;
345         u32                     irqenable;
346         struct mutex            irq_lock;               /* for irq bus access */
347 };
348
349 struct lan78xx_net {
350         struct net_device       *net;
351         struct usb_device       *udev;
352         struct usb_interface    *intf;
353         void                    *driver_priv;
354
355         int                     rx_qlen;
356         int                     tx_qlen;
357         struct sk_buff_head     rxq;
358         struct sk_buff_head     txq;
359         struct sk_buff_head     done;
360         struct sk_buff_head     rxq_pause;
361         struct sk_buff_head     txq_pend;
362
363         struct tasklet_struct   bh;
364         struct delayed_work     wq;
365
366         int                     msg_enable;
367
368         struct urb              *urb_intr;
369         struct usb_anchor       deferred;
370
371         struct mutex            phy_mutex; /* for phy access */
372         unsigned                pipe_in, pipe_out, pipe_intr;
373
374         u32                     hard_mtu;       /* count any extra framing */
375         size_t                  rx_urb_size;    /* size for rx urbs */
376
377         unsigned long           flags;
378
379         wait_queue_head_t       *wait;
380         unsigned char           suspend_count;
381
382         unsigned                maxpacket;
383         struct timer_list       delay;
384         struct timer_list       stat_monitor;
385
386         unsigned long           data[5];
387
388         int                     link_on;
389         u8                      mdix_ctrl;
390
391         u32                     chipid;
392         u32                     chiprev;
393         struct mii_bus          *mdiobus;
394         phy_interface_t         interface;
395
396         int                     fc_autoneg;
397         u8                      fc_request_control;
398
399         int                     delta;
400         struct statstage        stats;
401
402         struct irq_domain_data  domain_data;
403 };
404
405 /* define external phy id */
406 #define PHY_LAN8835                     (0x0007C130)
407 #define PHY_KSZ9031RNX                  (0x00221620)
408
409 /* use ethtool to change the level for any given device */
410 static int msg_level = -1;
411 module_param(msg_level, int, 0);
412 MODULE_PARM_DESC(msg_level, "Override default message level");
413
414 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
415 {
416         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
417         int ret;
418
419         if (!buf)
420                 return -ENOMEM;
421
422         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
423                               USB_VENDOR_REQUEST_READ_REGISTER,
424                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
425                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
426         if (likely(ret >= 0)) {
427                 le32_to_cpus(buf);
428                 *data = *buf;
429         } else {
430                 netdev_warn(dev->net,
431                             "Failed to read register index 0x%08x. ret = %d",
432                             index, ret);
433         }
434
435         kfree(buf);
436
437         return ret;
438 }
439
440 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
441 {
442         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
443         int ret;
444
445         if (!buf)
446                 return -ENOMEM;
447
448         *buf = data;
449         cpu_to_le32s(buf);
450
451         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
452                               USB_VENDOR_REQUEST_WRITE_REGISTER,
453                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
455         if (unlikely(ret < 0)) {
456                 netdev_warn(dev->net,
457                             "Failed to write register index 0x%08x. ret = %d",
458                             index, ret);
459         }
460
461         kfree(buf);
462
463         return ret;
464 }
465
466 static int lan78xx_read_stats(struct lan78xx_net *dev,
467                               struct lan78xx_statstage *data)
468 {
469         int ret = 0;
470         int i;
471         struct lan78xx_statstage *stats;
472         u32 *src;
473         u32 *dst;
474
475         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
476         if (!stats)
477                 return -ENOMEM;
478
479         ret = usb_control_msg(dev->udev,
480                               usb_rcvctrlpipe(dev->udev, 0),
481                               USB_VENDOR_REQUEST_GET_STATS,
482                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
483                               0,
484                               0,
485                               (void *)stats,
486                               sizeof(*stats),
487                               USB_CTRL_SET_TIMEOUT);
488         if (likely(ret >= 0)) {
489                 src = (u32 *)stats;
490                 dst = (u32 *)data;
491                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
492                         le32_to_cpus(&src[i]);
493                         dst[i] = src[i];
494                 }
495         } else {
496                 netdev_warn(dev->net,
497                             "Failed to read stat ret = %d", ret);
498         }
499
500         kfree(stats);
501
502         return ret;
503 }
504
505 #define check_counter_rollover(struct1, dev_stats, member) {    \
506         if (struct1->member < dev_stats.saved.member)           \
507                 dev_stats.rollover_count.member++;              \
508         }
509
510 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
511                                         struct lan78xx_statstage *stats)
512 {
513         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
514         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
515         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
516         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
517         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
518         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
519         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
520         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
521         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
522         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
523         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
524         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
525         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
526         check_counter_rollover(stats, dev->stats, rx_pause_frames);
527         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
528         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
529         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
530         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
531         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
533         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
534         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
535         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
536         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
537         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
538         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
539         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
540         check_counter_rollover(stats, dev->stats, tx_single_collisions);
541         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
542         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
543         check_counter_rollover(stats, dev->stats, tx_late_collisions);
544         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
545         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
546         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
547         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
548         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
549         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
550         check_counter_rollover(stats, dev->stats, tx_pause_frames);
551         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
552         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
553         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
554         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
555         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
557         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
558         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
559         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
560
561         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
562 }
563
564 static void lan78xx_update_stats(struct lan78xx_net *dev)
565 {
566         u32 *p, *count, *max;
567         u64 *data;
568         int i;
569         struct lan78xx_statstage lan78xx_stats;
570
571         if (usb_autopm_get_interface(dev->intf) < 0)
572                 return;
573
574         p = (u32 *)&lan78xx_stats;
575         count = (u32 *)&dev->stats.rollover_count;
576         max = (u32 *)&dev->stats.rollover_max;
577         data = (u64 *)&dev->stats.curr_stat;
578
579         mutex_lock(&dev->stats.access_lock);
580
581         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
582                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
583
584         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
585                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
586
587         mutex_unlock(&dev->stats.access_lock);
588
589         usb_autopm_put_interface(dev->intf);
590 }
591
592 /* Loop until the read is completed with timeout called with phy_mutex held */
593 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
594 {
595         unsigned long start_time = jiffies;
596         u32 val;
597         int ret;
598
599         do {
600                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
601                 if (unlikely(ret < 0))
602                         return -EIO;
603
604                 if (!(val & MII_ACC_MII_BUSY_))
605                         return 0;
606         } while (!time_after(jiffies, start_time + HZ));
607
608         return -EIO;
609 }
610
611 static inline u32 mii_access(int id, int index, int read)
612 {
613         u32 ret;
614
615         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
616         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
617         if (read)
618                 ret |= MII_ACC_MII_READ_;
619         else
620                 ret |= MII_ACC_MII_WRITE_;
621         ret |= MII_ACC_MII_BUSY_;
622
623         return ret;
624 }
625
626 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
627 {
628         unsigned long start_time = jiffies;
629         u32 val;
630         int ret;
631
632         do {
633                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
634                 if (unlikely(ret < 0))
635                         return -EIO;
636
637                 if (!(val & E2P_CMD_EPC_BUSY_) ||
638                     (val & E2P_CMD_EPC_TIMEOUT_))
639                         break;
640                 usleep_range(40, 100);
641         } while (!time_after(jiffies, start_time + HZ));
642
643         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
644                 netdev_warn(dev->net, "EEPROM read operation timeout");
645                 return -EIO;
646         }
647
648         return 0;
649 }
650
651 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
652 {
653         unsigned long start_time = jiffies;
654         u32 val;
655         int ret;
656
657         do {
658                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659                 if (unlikely(ret < 0))
660                         return -EIO;
661
662                 if (!(val & E2P_CMD_EPC_BUSY_))
663                         return 0;
664
665                 usleep_range(40, 100);
666         } while (!time_after(jiffies, start_time + HZ));
667
668         netdev_warn(dev->net, "EEPROM is busy");
669         return -EIO;
670 }
671
672 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
673                                    u32 length, u8 *data)
674 {
675         u32 val;
676         u32 saved;
677         int i, ret;
678         int retval;
679
680         /* depends on chip, some EEPROM pins are muxed with LED function.
681          * disable & restore LED function to access EEPROM.
682          */
683         ret = lan78xx_read_reg(dev, HW_CFG, &val);
684         saved = val;
685         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
686                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
687                 ret = lan78xx_write_reg(dev, HW_CFG, val);
688         }
689
690         retval = lan78xx_eeprom_confirm_not_busy(dev);
691         if (retval)
692                 return retval;
693
694         for (i = 0; i < length; i++) {
695                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
696                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
697                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
698                 if (unlikely(ret < 0)) {
699                         retval = -EIO;
700                         goto exit;
701                 }
702
703                 retval = lan78xx_wait_eeprom(dev);
704                 if (retval < 0)
705                         goto exit;
706
707                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
708                 if (unlikely(ret < 0)) {
709                         retval = -EIO;
710                         goto exit;
711                 }
712
713                 data[i] = val & 0xFF;
714                 offset++;
715         }
716
717         retval = 0;
718 exit:
719         if (dev->chipid == ID_REV_CHIP_ID_7800_)
720                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
721
722         return retval;
723 }
724
725 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
726                                u32 length, u8 *data)
727 {
728         u8 sig;
729         int ret;
730
731         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
732         if ((ret == 0) && (sig == EEPROM_INDICATOR))
733                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
734         else
735                 ret = -EINVAL;
736
737         return ret;
738 }
739
740 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
741                                     u32 length, u8 *data)
742 {
743         u32 val;
744         u32 saved;
745         int i, ret;
746         int retval;
747
748         /* depends on chip, some EEPROM pins are muxed with LED function.
749          * disable & restore LED function to access EEPROM.
750          */
751         ret = lan78xx_read_reg(dev, HW_CFG, &val);
752         saved = val;
753         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
754                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
755                 ret = lan78xx_write_reg(dev, HW_CFG, val);
756         }
757
758         retval = lan78xx_eeprom_confirm_not_busy(dev);
759         if (retval)
760                 goto exit;
761
762         /* Issue write/erase enable command */
763         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
764         ret = lan78xx_write_reg(dev, E2P_CMD, val);
765         if (unlikely(ret < 0)) {
766                 retval = -EIO;
767                 goto exit;
768         }
769
770         retval = lan78xx_wait_eeprom(dev);
771         if (retval < 0)
772                 goto exit;
773
774         for (i = 0; i < length; i++) {
775                 /* Fill data register */
776                 val = data[i];
777                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
778                 if (ret < 0) {
779                         retval = -EIO;
780                         goto exit;
781                 }
782
783                 /* Send "write" command */
784                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
785                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
786                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
787                 if (ret < 0) {
788                         retval = -EIO;
789                         goto exit;
790                 }
791
792                 retval = lan78xx_wait_eeprom(dev);
793                 if (retval < 0)
794                         goto exit;
795
796                 offset++;
797         }
798
799         retval = 0;
800 exit:
801         if (dev->chipid == ID_REV_CHIP_ID_7800_)
802                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
803
804         return retval;
805 }
806
807 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
808                                 u32 length, u8 *data)
809 {
810         int i;
811         int ret;
812         u32 buf;
813         unsigned long timeout;
814
815         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816
817         if (buf & OTP_PWR_DN_PWRDN_N_) {
818                 /* clear it and wait to be cleared */
819                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820
821                 timeout = jiffies + HZ;
822                 do {
823                         usleep_range(1, 10);
824                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825                         if (time_after(jiffies, timeout)) {
826                                 netdev_warn(dev->net,
827                                             "timeout on OTP_PWR_DN");
828                                 return -EIO;
829                         }
830                 } while (buf & OTP_PWR_DN_PWRDN_N_);
831         }
832
833         for (i = 0; i < length; i++) {
834                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
835                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
836                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
837                                         ((offset + i) & OTP_ADDR2_10_3));
838
839                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
840                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
841
842                 timeout = jiffies + HZ;
843                 do {
844                         udelay(1);
845                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
846                         if (time_after(jiffies, timeout)) {
847                                 netdev_warn(dev->net,
848                                             "timeout on OTP_STATUS");
849                                 return -EIO;
850                         }
851                 } while (buf & OTP_STATUS_BUSY_);
852
853                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
854
855                 data[i] = (u8)(buf & 0xFF);
856         }
857
858         return 0;
859 }
860
861 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
862                                  u32 length, u8 *data)
863 {
864         int i;
865         int ret;
866         u32 buf;
867         unsigned long timeout;
868
869         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
870
871         if (buf & OTP_PWR_DN_PWRDN_N_) {
872                 /* clear it and wait to be cleared */
873                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
874
875                 timeout = jiffies + HZ;
876                 do {
877                         udelay(1);
878                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879                         if (time_after(jiffies, timeout)) {
880                                 netdev_warn(dev->net,
881                                             "timeout on OTP_PWR_DN completion");
882                                 return -EIO;
883                         }
884                 } while (buf & OTP_PWR_DN_PWRDN_N_);
885         }
886
887         /* set to BYTE program mode */
888         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
889
890         for (i = 0; i < length; i++) {
891                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
892                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
893                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
894                                         ((offset + i) & OTP_ADDR2_10_3));
895                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
896                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
897                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
898
899                 timeout = jiffies + HZ;
900                 do {
901                         udelay(1);
902                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
903                         if (time_after(jiffies, timeout)) {
904                                 netdev_warn(dev->net,
905                                             "Timeout on OTP_STATUS completion");
906                                 return -EIO;
907                         }
908                 } while (buf & OTP_STATUS_BUSY_);
909         }
910
911         return 0;
912 }
913
914 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
915                             u32 length, u8 *data)
916 {
917         u8 sig;
918         int ret;
919
920         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
921
922         if (ret == 0) {
923                 if (sig == OTP_INDICATOR_2)
924                         offset += 0x100;
925                 else if (sig != OTP_INDICATOR_1)
926                         ret = -EINVAL;
927                 if (!ret)
928                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
929         }
930
931         return ret;
932 }
933
934 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
935 {
936         int i, ret;
937
938         for (i = 0; i < 100; i++) {
939                 u32 dp_sel;
940
941                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
942                 if (unlikely(ret < 0))
943                         return -EIO;
944
945                 if (dp_sel & DP_SEL_DPRDY_)
946                         return 0;
947
948                 usleep_range(40, 100);
949         }
950
951         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
952
953         return -EIO;
954 }
955
956 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
957                                   u32 addr, u32 length, u32 *buf)
958 {
959         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
960         u32 dp_sel;
961         int i, ret;
962
963         if (usb_autopm_get_interface(dev->intf) < 0)
964                         return 0;
965
966         mutex_lock(&pdata->dataport_mutex);
967
968         ret = lan78xx_dataport_wait_not_busy(dev);
969         if (ret < 0)
970                 goto done;
971
972         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
973
974         dp_sel &= ~DP_SEL_RSEL_MASK_;
975         dp_sel |= ram_select;
976         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
977
978         for (i = 0; i < length; i++) {
979                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
980
981                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
982
983                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
984
985                 ret = lan78xx_dataport_wait_not_busy(dev);
986                 if (ret < 0)
987                         goto done;
988         }
989
990 done:
991         mutex_unlock(&pdata->dataport_mutex);
992         usb_autopm_put_interface(dev->intf);
993
994         return ret;
995 }
996
997 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
998                                     int index, u8 addr[ETH_ALEN])
999 {
1000         u32     temp;
1001
1002         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1003                 temp = addr[3];
1004                 temp = addr[2] | (temp << 8);
1005                 temp = addr[1] | (temp << 8);
1006                 temp = addr[0] | (temp << 8);
1007                 pdata->pfilter_table[index][1] = temp;
1008                 temp = addr[5];
1009                 temp = addr[4] | (temp << 8);
1010                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1011                 pdata->pfilter_table[index][0] = temp;
1012         }
1013 }
1014
1015 /* returns hash bit number for given MAC address */
1016 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1017 {
1018         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1019 }
1020
1021 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1022 {
1023         struct lan78xx_priv *pdata =
1024                         container_of(param, struct lan78xx_priv, set_multicast);
1025         struct lan78xx_net *dev = pdata->dev;
1026         int i;
1027         int ret;
1028
1029         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1030                   pdata->rfe_ctl);
1031
1032         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1033                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1034
1035         for (i = 1; i < NUM_OF_MAF; i++) {
1036                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1037                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1038                                         pdata->pfilter_table[i][1]);
1039                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1040                                         pdata->pfilter_table[i][0]);
1041         }
1042
1043         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1044 }
1045
1046 static void lan78xx_set_multicast(struct net_device *netdev)
1047 {
1048         struct lan78xx_net *dev = netdev_priv(netdev);
1049         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1050         unsigned long flags;
1051         int i;
1052
1053         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1054
1055         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1056                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1057
1058         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1059                         pdata->mchash_table[i] = 0;
1060         /* pfilter_table[0] has own HW address */
1061         for (i = 1; i < NUM_OF_MAF; i++) {
1062                         pdata->pfilter_table[i][0] =
1063                         pdata->pfilter_table[i][1] = 0;
1064         }
1065
1066         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1067
1068         if (dev->net->flags & IFF_PROMISC) {
1069                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1070                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1071         } else {
1072                 if (dev->net->flags & IFF_ALLMULTI) {
1073                         netif_dbg(dev, drv, dev->net,
1074                                   "receive all multicast enabled");
1075                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1076                 }
1077         }
1078
1079         if (netdev_mc_count(dev->net)) {
1080                 struct netdev_hw_addr *ha;
1081                 int i;
1082
1083                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1084
1085                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1086
1087                 i = 1;
1088                 netdev_for_each_mc_addr(ha, netdev) {
1089                         /* set first 32 into Perfect Filter */
1090                         if (i < 33) {
1091                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1092                         } else {
1093                                 u32 bitnum = lan78xx_hash(ha->addr);
1094
1095                                 pdata->mchash_table[bitnum / 32] |=
1096                                                         (1 << (bitnum % 32));
1097                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1098                         }
1099                         i++;
1100                 }
1101         }
1102
1103         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1104
1105         /* defer register writes to a sleepable context */
1106         schedule_work(&pdata->set_multicast);
1107 }
1108
1109 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1110                                       u16 lcladv, u16 rmtadv)
1111 {
1112         u32 flow = 0, fct_flow = 0;
1113         int ret;
1114         u8 cap;
1115
1116         if (dev->fc_autoneg)
1117                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1118         else
1119                 cap = dev->fc_request_control;
1120
1121         if (cap & FLOW_CTRL_TX)
1122                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1123
1124         if (cap & FLOW_CTRL_RX)
1125                 flow |= FLOW_CR_RX_FCEN_;
1126
1127         if (dev->udev->speed == USB_SPEED_SUPER)
1128                 fct_flow = 0x817;
1129         else if (dev->udev->speed == USB_SPEED_HIGH)
1130                 fct_flow = 0x211;
1131
1132         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1133                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1134                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1135
1136         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1137
1138         /* threshold value should be set before enabling flow */
1139         ret = lan78xx_write_reg(dev, FLOW, flow);
1140
1141         return 0;
1142 }
1143
1144 static int lan78xx_link_reset(struct lan78xx_net *dev)
1145 {
1146         struct phy_device *phydev = dev->net->phydev;
1147         struct ethtool_link_ksettings ecmd;
1148         int ladv, radv, ret, link;
1149         u32 buf;
1150
1151         /* clear LAN78xx interrupt status */
1152         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1153         if (unlikely(ret < 0))
1154                 return -EIO;
1155
1156         mutex_lock(&phydev->lock);
1157         phy_read_status(phydev);
1158         link = phydev->link;
1159         mutex_unlock(&phydev->lock);
1160
1161         if (!link && dev->link_on) {
1162                 dev->link_on = false;
1163
1164                 /* reset MAC */
1165                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166                 if (unlikely(ret < 0))
1167                         return -EIO;
1168                 buf |= MAC_CR_RST_;
1169                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170                 if (unlikely(ret < 0))
1171                         return -EIO;
1172
1173                 del_timer(&dev->stat_monitor);
1174         } else if (link && !dev->link_on) {
1175                 dev->link_on = true;
1176
1177                 phy_ethtool_ksettings_get(phydev, &ecmd);
1178
1179                 if (dev->udev->speed == USB_SPEED_SUPER) {
1180                         if (ecmd.base.speed == 1000) {
1181                                 /* disable U2 */
1182                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1185                                 /* enable U1 */
1186                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1189                         } else {
1190                                 /* enable U1 & U2 */
1191                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1195                         }
1196                 }
1197
1198                 ladv = phy_read(phydev, MII_ADVERTISE);
1199                 if (ladv < 0)
1200                         return ladv;
1201
1202                 radv = phy_read(phydev, MII_LPA);
1203                 if (radv < 0)
1204                         return radv;
1205
1206                 netif_dbg(dev, link, dev->net,
1207                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1209
1210                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1211                                                  radv);
1212
1213                 if (!timer_pending(&dev->stat_monitor)) {
1214                         dev->delta = 1;
1215                         mod_timer(&dev->stat_monitor,
1216                                   jiffies + STAT_UPDATE_TIMER);
1217                 }
1218
1219                 tasklet_schedule(&dev->bh);
1220         }
1221
1222         return ret;
1223 }
1224
1225 /* some work can't be done in tasklets, so we use keventd
1226  *
1227  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1228  * but tasklet_schedule() doesn't.      hope the failure is rare.
1229  */
1230 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1231 {
1232         set_bit(work, &dev->flags);
1233         if (!schedule_delayed_work(&dev->wq, 0))
1234                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1235 }
1236
1237 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1238 {
1239         u32 intdata;
1240
1241         if (urb->actual_length != 4) {
1242                 netdev_warn(dev->net,
1243                             "unexpected urb length %d", urb->actual_length);
1244                 return;
1245         }
1246
1247         memcpy(&intdata, urb->transfer_buffer, 4);
1248         le32_to_cpus(&intdata);
1249
1250         if (intdata & INT_ENP_PHY_INT) {
1251                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1252                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1253
1254                 if (dev->domain_data.phyirq > 0)
1255                         generic_handle_irq(dev->domain_data.phyirq);
1256         } else
1257                 netdev_warn(dev->net,
1258                             "unexpected interrupt: 0x%08x\n", intdata);
1259 }
1260
1261 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1262 {
1263         return MAX_EEPROM_SIZE;
1264 }
1265
1266 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1267                                       struct ethtool_eeprom *ee, u8 *data)
1268 {
1269         struct lan78xx_net *dev = netdev_priv(netdev);
1270         int ret;
1271
1272         ret = usb_autopm_get_interface(dev->intf);
1273         if (ret)
1274                 return ret;
1275
1276         ee->magic = LAN78XX_EEPROM_MAGIC;
1277
1278         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1279
1280         usb_autopm_put_interface(dev->intf);
1281
1282         return ret;
1283 }
1284
1285 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1286                                       struct ethtool_eeprom *ee, u8 *data)
1287 {
1288         struct lan78xx_net *dev = netdev_priv(netdev);
1289         int ret;
1290
1291         ret = usb_autopm_get_interface(dev->intf);
1292         if (ret)
1293                 return ret;
1294
1295         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1296          * to load data from EEPROM
1297          */
1298         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1299                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1300         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1301                  (ee->offset == 0) &&
1302                  (ee->len == 512) &&
1303                  (data[0] == OTP_INDICATOR_1))
1304                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1305
1306         usb_autopm_put_interface(dev->intf);
1307
1308         return ret;
1309 }
1310
1311 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1312                                 u8 *data)
1313 {
1314         if (stringset == ETH_SS_STATS)
1315                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1316 }
1317
1318 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1319 {
1320         if (sset == ETH_SS_STATS)
1321                 return ARRAY_SIZE(lan78xx_gstrings);
1322         else
1323                 return -EOPNOTSUPP;
1324 }
1325
1326 static void lan78xx_get_stats(struct net_device *netdev,
1327                               struct ethtool_stats *stats, u64 *data)
1328 {
1329         struct lan78xx_net *dev = netdev_priv(netdev);
1330
1331         lan78xx_update_stats(dev);
1332
1333         mutex_lock(&dev->stats.access_lock);
1334         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1335         mutex_unlock(&dev->stats.access_lock);
1336 }
1337
1338 static void lan78xx_get_wol(struct net_device *netdev,
1339                             struct ethtool_wolinfo *wol)
1340 {
1341         struct lan78xx_net *dev = netdev_priv(netdev);
1342         int ret;
1343         u32 buf;
1344         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1345
1346         if (usb_autopm_get_interface(dev->intf) < 0)
1347                         return;
1348
1349         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1350         if (unlikely(ret < 0)) {
1351                 wol->supported = 0;
1352                 wol->wolopts = 0;
1353         } else {
1354                 if (buf & USB_CFG_RMT_WKP_) {
1355                         wol->supported = WAKE_ALL;
1356                         wol->wolopts = pdata->wol;
1357                 } else {
1358                         wol->supported = 0;
1359                         wol->wolopts = 0;
1360                 }
1361         }
1362
1363         usb_autopm_put_interface(dev->intf);
1364 }
1365
1366 static int lan78xx_set_wol(struct net_device *netdev,
1367                            struct ethtool_wolinfo *wol)
1368 {
1369         struct lan78xx_net *dev = netdev_priv(netdev);
1370         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1371         int ret;
1372
1373         ret = usb_autopm_get_interface(dev->intf);
1374         if (ret < 0)
1375                 return ret;
1376
1377         if (wol->wolopts & ~WAKE_ALL)
1378                 return -EINVAL;
1379
1380         pdata->wol = wol->wolopts;
1381
1382         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1383
1384         phy_ethtool_set_wol(netdev->phydev, wol);
1385
1386         usb_autopm_put_interface(dev->intf);
1387
1388         return ret;
1389 }
1390
1391 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1392 {
1393         struct lan78xx_net *dev = netdev_priv(net);
1394         struct phy_device *phydev = net->phydev;
1395         int ret;
1396         u32 buf;
1397
1398         ret = usb_autopm_get_interface(dev->intf);
1399         if (ret < 0)
1400                 return ret;
1401
1402         ret = phy_ethtool_get_eee(phydev, edata);
1403         if (ret < 0)
1404                 goto exit;
1405
1406         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1407         if (buf & MAC_CR_EEE_EN_) {
1408                 edata->eee_enabled = true;
1409                 edata->eee_active = !!(edata->advertised &
1410                                        edata->lp_advertised);
1411                 edata->tx_lpi_enabled = true;
1412                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1413                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1414                 edata->tx_lpi_timer = buf;
1415         } else {
1416                 edata->eee_enabled = false;
1417                 edata->eee_active = false;
1418                 edata->tx_lpi_enabled = false;
1419                 edata->tx_lpi_timer = 0;
1420         }
1421
1422         ret = 0;
1423 exit:
1424         usb_autopm_put_interface(dev->intf);
1425
1426         return ret;
1427 }
1428
1429 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1430 {
1431         struct lan78xx_net *dev = netdev_priv(net);
1432         int ret;
1433         u32 buf;
1434
1435         ret = usb_autopm_get_interface(dev->intf);
1436         if (ret < 0)
1437                 return ret;
1438
1439         if (edata->eee_enabled) {
1440                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441                 buf |= MAC_CR_EEE_EN_;
1442                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1443
1444                 phy_ethtool_set_eee(net->phydev, edata);
1445
1446                 buf = (u32)edata->tx_lpi_timer;
1447                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1448         } else {
1449                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1450                 buf &= ~MAC_CR_EEE_EN_;
1451                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1452         }
1453
1454         usb_autopm_put_interface(dev->intf);
1455
1456         return 0;
1457 }
1458
1459 static u32 lan78xx_get_link(struct net_device *net)
1460 {
1461         u32 link;
1462
1463         mutex_lock(&net->phydev->lock);
1464         phy_read_status(net->phydev);
1465         link = net->phydev->link;
1466         mutex_unlock(&net->phydev->lock);
1467
1468         return link;
1469 }
1470
1471 static void lan78xx_get_drvinfo(struct net_device *net,
1472                                 struct ethtool_drvinfo *info)
1473 {
1474         struct lan78xx_net *dev = netdev_priv(net);
1475
1476         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1477         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1478         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1479 }
1480
1481 static u32 lan78xx_get_msglevel(struct net_device *net)
1482 {
1483         struct lan78xx_net *dev = netdev_priv(net);
1484
1485         return dev->msg_enable;
1486 }
1487
1488 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1489 {
1490         struct lan78xx_net *dev = netdev_priv(net);
1491
1492         dev->msg_enable = level;
1493 }
1494
1495 static int lan78xx_get_link_ksettings(struct net_device *net,
1496                                       struct ethtool_link_ksettings *cmd)
1497 {
1498         struct lan78xx_net *dev = netdev_priv(net);
1499         struct phy_device *phydev = net->phydev;
1500         int ret;
1501
1502         ret = usb_autopm_get_interface(dev->intf);
1503         if (ret < 0)
1504                 return ret;
1505
1506         phy_ethtool_ksettings_get(phydev, cmd);
1507
1508         usb_autopm_put_interface(dev->intf);
1509
1510         return ret;
1511 }
1512
1513 static int lan78xx_set_link_ksettings(struct net_device *net,
1514                                       const struct ethtool_link_ksettings *cmd)
1515 {
1516         struct lan78xx_net *dev = netdev_priv(net);
1517         struct phy_device *phydev = net->phydev;
1518         int ret = 0;
1519         int temp;
1520
1521         ret = usb_autopm_get_interface(dev->intf);
1522         if (ret < 0)
1523                 return ret;
1524
1525         /* change speed & duplex */
1526         ret = phy_ethtool_ksettings_set(phydev, cmd);
1527
1528         if (!cmd->base.autoneg) {
1529                 /* force link down */
1530                 temp = phy_read(phydev, MII_BMCR);
1531                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1532                 mdelay(1);
1533                 phy_write(phydev, MII_BMCR, temp);
1534         }
1535
1536         usb_autopm_put_interface(dev->intf);
1537
1538         return ret;
1539 }
1540
1541 static void lan78xx_get_pause(struct net_device *net,
1542                               struct ethtool_pauseparam *pause)
1543 {
1544         struct lan78xx_net *dev = netdev_priv(net);
1545         struct phy_device *phydev = net->phydev;
1546         struct ethtool_link_ksettings ecmd;
1547
1548         phy_ethtool_ksettings_get(phydev, &ecmd);
1549
1550         pause->autoneg = dev->fc_autoneg;
1551
1552         if (dev->fc_request_control & FLOW_CTRL_TX)
1553                 pause->tx_pause = 1;
1554
1555         if (dev->fc_request_control & FLOW_CTRL_RX)
1556                 pause->rx_pause = 1;
1557 }
1558
1559 static int lan78xx_set_pause(struct net_device *net,
1560                              struct ethtool_pauseparam *pause)
1561 {
1562         struct lan78xx_net *dev = netdev_priv(net);
1563         struct phy_device *phydev = net->phydev;
1564         struct ethtool_link_ksettings ecmd;
1565         int ret;
1566
1567         phy_ethtool_ksettings_get(phydev, &ecmd);
1568
1569         if (pause->autoneg && !ecmd.base.autoneg) {
1570                 ret = -EINVAL;
1571                 goto exit;
1572         }
1573
1574         dev->fc_request_control = 0;
1575         if (pause->rx_pause)
1576                 dev->fc_request_control |= FLOW_CTRL_RX;
1577
1578         if (pause->tx_pause)
1579                 dev->fc_request_control |= FLOW_CTRL_TX;
1580
1581         if (ecmd.base.autoneg) {
1582                 u32 mii_adv;
1583                 u32 advertising;
1584
1585                 ethtool_convert_link_mode_to_legacy_u32(
1586                         &advertising, ecmd.link_modes.advertising);
1587
1588                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1589                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1590                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1591
1592                 ethtool_convert_legacy_u32_to_link_mode(
1593                         ecmd.link_modes.advertising, advertising);
1594
1595                 phy_ethtool_ksettings_set(phydev, &ecmd);
1596         }
1597
1598         dev->fc_autoneg = pause->autoneg;
1599
1600         ret = 0;
1601 exit:
1602         return ret;
1603 }
1604
1605 static const struct ethtool_ops lan78xx_ethtool_ops = {
1606         .get_link       = lan78xx_get_link,
1607         .nway_reset     = phy_ethtool_nway_reset,
1608         .get_drvinfo    = lan78xx_get_drvinfo,
1609         .get_msglevel   = lan78xx_get_msglevel,
1610         .set_msglevel   = lan78xx_set_msglevel,
1611         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1612         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1613         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1614         .get_ethtool_stats = lan78xx_get_stats,
1615         .get_sset_count = lan78xx_get_sset_count,
1616         .get_strings    = lan78xx_get_strings,
1617         .get_wol        = lan78xx_get_wol,
1618         .set_wol        = lan78xx_set_wol,
1619         .get_eee        = lan78xx_get_eee,
1620         .set_eee        = lan78xx_set_eee,
1621         .get_pauseparam = lan78xx_get_pause,
1622         .set_pauseparam = lan78xx_set_pause,
1623         .get_link_ksettings = lan78xx_get_link_ksettings,
1624         .set_link_ksettings = lan78xx_set_link_ksettings,
1625 };
1626
1627 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1628 {
1629         if (!netif_running(netdev))
1630                 return -EINVAL;
1631
1632         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1633 }
1634
1635 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1636 {
1637         u32 addr_lo, addr_hi;
1638         int ret;
1639         u8 addr[6];
1640
1641         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1642         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1643
1644         addr[0] = addr_lo & 0xFF;
1645         addr[1] = (addr_lo >> 8) & 0xFF;
1646         addr[2] = (addr_lo >> 16) & 0xFF;
1647         addr[3] = (addr_lo >> 24) & 0xFF;
1648         addr[4] = addr_hi & 0xFF;
1649         addr[5] = (addr_hi >> 8) & 0xFF;
1650
1651         if (!is_valid_ether_addr(addr)) {
1652                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1653                         /* valid address present in Device Tree */
1654                         netif_dbg(dev, ifup, dev->net,
1655                                   "MAC address read from Device Tree");
1656                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1657                                                  ETH_ALEN, addr) == 0) ||
1658                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1659                                               ETH_ALEN, addr) == 0)) &&
1660                            is_valid_ether_addr(addr)) {
1661                         /* eeprom values are valid so use them */
1662                         netif_dbg(dev, ifup, dev->net,
1663                                   "MAC address read from EEPROM");
1664                 } else {
1665                         /* generate random MAC */
1666                         random_ether_addr(addr);
1667                         netif_dbg(dev, ifup, dev->net,
1668                                   "MAC address set to random addr");
1669                 }
1670
1671                 addr_lo = addr[0] | (addr[1] << 8) |
1672                           (addr[2] << 16) | (addr[3] << 24);
1673                 addr_hi = addr[4] | (addr[5] << 8);
1674
1675                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1676                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1677         }
1678
1679         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1680         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1681
1682         ether_addr_copy(dev->net->dev_addr, addr);
1683 }
1684
1685 /* MDIO read and write wrappers for phylib */
1686 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1687 {
1688         struct lan78xx_net *dev = bus->priv;
1689         u32 val, addr;
1690         int ret;
1691
1692         ret = usb_autopm_get_interface(dev->intf);
1693         if (ret < 0)
1694                 return ret;
1695
1696         mutex_lock(&dev->phy_mutex);
1697
1698         /* confirm MII not busy */
1699         ret = lan78xx_phy_wait_not_busy(dev);
1700         if (ret < 0)
1701                 goto done;
1702
1703         /* set the address, index & direction (read from PHY) */
1704         addr = mii_access(phy_id, idx, MII_READ);
1705         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1706
1707         ret = lan78xx_phy_wait_not_busy(dev);
1708         if (ret < 0)
1709                 goto done;
1710
1711         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1712
1713         ret = (int)(val & 0xFFFF);
1714
1715 done:
1716         mutex_unlock(&dev->phy_mutex);
1717         usb_autopm_put_interface(dev->intf);
1718
1719         return ret;
1720 }
1721
1722 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1723                                  u16 regval)
1724 {
1725         struct lan78xx_net *dev = bus->priv;
1726         u32 val, addr;
1727         int ret;
1728
1729         ret = usb_autopm_get_interface(dev->intf);
1730         if (ret < 0)
1731                 return ret;
1732
1733         mutex_lock(&dev->phy_mutex);
1734
1735         /* confirm MII not busy */
1736         ret = lan78xx_phy_wait_not_busy(dev);
1737         if (ret < 0)
1738                 goto done;
1739
1740         val = (u32)regval;
1741         ret = lan78xx_write_reg(dev, MII_DATA, val);
1742
1743         /* set the address, index & direction (write to PHY) */
1744         addr = mii_access(phy_id, idx, MII_WRITE);
1745         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1746
1747         ret = lan78xx_phy_wait_not_busy(dev);
1748         if (ret < 0)
1749                 goto done;
1750
1751 done:
1752         mutex_unlock(&dev->phy_mutex);
1753         usb_autopm_put_interface(dev->intf);
1754         return 0;
1755 }
1756
1757 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1758 {
1759         int ret;
1760
1761         dev->mdiobus = mdiobus_alloc();
1762         if (!dev->mdiobus) {
1763                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1764                 return -ENOMEM;
1765         }
1766
1767         dev->mdiobus->priv = (void *)dev;
1768         dev->mdiobus->read = lan78xx_mdiobus_read;
1769         dev->mdiobus->write = lan78xx_mdiobus_write;
1770         dev->mdiobus->name = "lan78xx-mdiobus";
1771         dev->mdiobus->parent = &dev->udev->dev;
1772
1773         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1774                  dev->udev->bus->busnum, dev->udev->devnum);
1775
1776         switch (dev->chipid) {
1777         case ID_REV_CHIP_ID_7800_:
1778         case ID_REV_CHIP_ID_7850_:
1779                 /* set to internal PHY id */
1780                 dev->mdiobus->phy_mask = ~(1 << 1);
1781                 break;
1782         case ID_REV_CHIP_ID_7801_:
1783                 /* scan thru PHYAD[2..0] */
1784                 dev->mdiobus->phy_mask = ~(0xFF);
1785                 break;
1786         }
1787
1788         ret = mdiobus_register(dev->mdiobus);
1789         if (ret) {
1790                 netdev_err(dev->net, "can't register MDIO bus\n");
1791                 goto exit1;
1792         }
1793
1794         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1795         return 0;
1796 exit1:
1797         mdiobus_free(dev->mdiobus);
1798         return ret;
1799 }
1800
1801 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1802 {
1803         mdiobus_unregister(dev->mdiobus);
1804         mdiobus_free(dev->mdiobus);
1805 }
1806
1807 static void lan78xx_link_status_change(struct net_device *net)
1808 {
1809         struct phy_device *phydev = net->phydev;
1810         int ret, temp;
1811
1812         /* At forced 100 F/H mode, chip may fail to set mode correctly
1813          * when cable is switched between long(~50+m) and short one.
1814          * As workaround, set to 10 before setting to 100
1815          * at forced 100 F/H mode.
1816          */
1817         if (!phydev->autoneg && (phydev->speed == 100)) {
1818                 /* disable phy interrupt */
1819                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1820                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1821                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1822
1823                 temp = phy_read(phydev, MII_BMCR);
1824                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1825                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1826                 temp |= BMCR_SPEED100;
1827                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1828
1829                 /* clear pending interrupt generated while workaround */
1830                 temp = phy_read(phydev, LAN88XX_INT_STS);
1831
1832                 /* enable phy interrupt back */
1833                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1834                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1835                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1836         }
1837 }
1838
1839 static int irq_map(struct irq_domain *d, unsigned int irq,
1840                    irq_hw_number_t hwirq)
1841 {
1842         struct irq_domain_data *data = d->host_data;
1843
1844         irq_set_chip_data(irq, data);
1845         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1846         irq_set_noprobe(irq);
1847
1848         return 0;
1849 }
1850
1851 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1852 {
1853         irq_set_chip_and_handler(irq, NULL, NULL);
1854         irq_set_chip_data(irq, NULL);
1855 }
1856
1857 static const struct irq_domain_ops chip_domain_ops = {
1858         .map    = irq_map,
1859         .unmap  = irq_unmap,
1860 };
1861
1862 static void lan78xx_irq_mask(struct irq_data *irqd)
1863 {
1864         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1865
1866         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1867 }
1868
1869 static void lan78xx_irq_unmask(struct irq_data *irqd)
1870 {
1871         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1872
1873         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1874 }
1875
1876 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1877 {
1878         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1879
1880         mutex_lock(&data->irq_lock);
1881 }
1882
1883 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1884 {
1885         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1886         struct lan78xx_net *dev =
1887                         container_of(data, struct lan78xx_net, domain_data);
1888         u32 buf;
1889         int ret;
1890
1891         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1892          * are only two callbacks executed in non-atomic contex.
1893          */
1894         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1895         if (buf != data->irqenable)
1896                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1897
1898         mutex_unlock(&data->irq_lock);
1899 }
1900
1901 static struct irq_chip lan78xx_irqchip = {
1902         .name                   = "lan78xx-irqs",
1903         .irq_mask               = lan78xx_irq_mask,
1904         .irq_unmask             = lan78xx_irq_unmask,
1905         .irq_bus_lock           = lan78xx_irq_bus_lock,
1906         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1907 };
1908
1909 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1910 {
1911         struct device_node *of_node;
1912         struct irq_domain *irqdomain;
1913         unsigned int irqmap = 0;
1914         u32 buf;
1915         int ret = 0;
1916
1917         of_node = dev->udev->dev.parent->of_node;
1918
1919         mutex_init(&dev->domain_data.irq_lock);
1920
1921         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1922         dev->domain_data.irqenable = buf;
1923
1924         dev->domain_data.irqchip = &lan78xx_irqchip;
1925         dev->domain_data.irq_handler = handle_simple_irq;
1926
1927         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1928                                           &chip_domain_ops, &dev->domain_data);
1929         if (irqdomain) {
1930                 /* create mapping for PHY interrupt */
1931                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1932                 if (!irqmap) {
1933                         irq_domain_remove(irqdomain);
1934
1935                         irqdomain = NULL;
1936                         ret = -EINVAL;
1937                 }
1938         } else {
1939                 ret = -EINVAL;
1940         }
1941
1942         dev->domain_data.irqdomain = irqdomain;
1943         dev->domain_data.phyirq = irqmap;
1944
1945         return ret;
1946 }
1947
1948 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1949 {
1950         if (dev->domain_data.phyirq > 0) {
1951                 irq_dispose_mapping(dev->domain_data.phyirq);
1952
1953                 if (dev->domain_data.irqdomain)
1954                         irq_domain_remove(dev->domain_data.irqdomain);
1955         }
1956         dev->domain_data.phyirq = 0;
1957         dev->domain_data.irqdomain = NULL;
1958 }
1959
1960 static int lan8835_fixup(struct phy_device *phydev)
1961 {
1962         int buf;
1963         int ret;
1964         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1965
1966         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1967         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1968         buf &= ~0x1800;
1969         buf |= 0x0800;
1970         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1971
1972         /* RGMII MAC TXC Delay Enable */
1973         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1974                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1975
1976         /* RGMII TX DLL Tune Adjust */
1977         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1978
1979         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1980
1981         return 1;
1982 }
1983
1984 static int ksz9031rnx_fixup(struct phy_device *phydev)
1985 {
1986         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1987
1988         /* Micrel9301RNX PHY configuration */
1989         /* RGMII Control Signal Pad Skew */
1990         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1991         /* RGMII RX Data Pad Skew */
1992         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1993         /* RGMII RX Clock Pad Skew */
1994         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1995
1996         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1997
1998         return 1;
1999 }
2000
2001 static int lan78xx_phy_init(struct lan78xx_net *dev)
2002 {
2003         int ret;
2004         u32 mii_adv;
2005         struct phy_device *phydev = dev->net->phydev;
2006
2007         phydev = phy_find_first(dev->mdiobus);
2008         if (!phydev) {
2009                 netdev_err(dev->net, "no PHY found\n");
2010                 return -EIO;
2011         }
2012
2013         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2014             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2015                 phydev->is_internal = true;
2016                 dev->interface = PHY_INTERFACE_MODE_GMII;
2017
2018         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2019                 if (!phydev->drv) {
2020                         netdev_err(dev->net, "no PHY driver found\n");
2021                         return -EIO;
2022                 }
2023
2024                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2025
2026                 /* external PHY fixup for KSZ9031RNX */
2027                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2028                                                  ksz9031rnx_fixup);
2029                 if (ret < 0) {
2030                         netdev_err(dev->net, "fail to register fixup\n");
2031                         return ret;
2032                 }
2033                 /* external PHY fixup for LAN8835 */
2034                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2035                                                  lan8835_fixup);
2036                 if (ret < 0) {
2037                         netdev_err(dev->net, "fail to register fixup\n");
2038                         return ret;
2039                 }
2040                 /* add more external PHY fixup here if needed */
2041
2042                 phydev->is_internal = false;
2043         } else {
2044                 netdev_err(dev->net, "unknown ID found\n");
2045                 ret = -EIO;
2046                 goto error;
2047         }
2048
2049         /* if phyirq is not set, use polling mode in phylib */
2050         if (dev->domain_data.phyirq > 0)
2051                 phydev->irq = dev->domain_data.phyirq;
2052         else
2053                 phydev->irq = PHY_POLL;
2054         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2055
2056         /* set to AUTOMDIX */
2057         phydev->mdix = ETH_TP_MDI_AUTO;
2058
2059         ret = phy_connect_direct(dev->net, phydev,
2060                                  lan78xx_link_status_change,
2061                                  dev->interface);
2062         if (ret) {
2063                 netdev_err(dev->net, "can't attach PHY to %s\n",
2064                            dev->mdiobus->id);
2065                 return -EIO;
2066         }
2067
2068         /* MAC doesn't support 1000T Half */
2069         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2070
2071         /* support both flow controls */
2072         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2073         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2074         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2075         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2076
2077         genphy_config_aneg(phydev);
2078
2079         dev->fc_autoneg = phydev->autoneg;
2080
2081         return 0;
2082
2083 error:
2084         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2085         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2086
2087         return ret;
2088 }
2089
2090 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2091 {
2092         int ret = 0;
2093         u32 buf;
2094         bool rxenabled;
2095
2096         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2097
2098         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2099
2100         if (rxenabled) {
2101                 buf &= ~MAC_RX_RXEN_;
2102                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2103         }
2104
2105         /* add 4 to size for FCS */
2106         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2107         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2108
2109         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2110
2111         if (rxenabled) {
2112                 buf |= MAC_RX_RXEN_;
2113                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2114         }
2115
2116         return 0;
2117 }
2118
2119 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2120 {
2121         struct sk_buff *skb;
2122         unsigned long flags;
2123         int count = 0;
2124
2125         spin_lock_irqsave(&q->lock, flags);
2126         while (!skb_queue_empty(q)) {
2127                 struct skb_data *entry;
2128                 struct urb *urb;
2129                 int ret;
2130
2131                 skb_queue_walk(q, skb) {
2132                         entry = (struct skb_data *)skb->cb;
2133                         if (entry->state != unlink_start)
2134                                 goto found;
2135                 }
2136                 break;
2137 found:
2138                 entry->state = unlink_start;
2139                 urb = entry->urb;
2140
2141                 /* Get reference count of the URB to avoid it to be
2142                  * freed during usb_unlink_urb, which may trigger
2143                  * use-after-free problem inside usb_unlink_urb since
2144                  * usb_unlink_urb is always racing with .complete
2145                  * handler(include defer_bh).
2146                  */
2147                 usb_get_urb(urb);
2148                 spin_unlock_irqrestore(&q->lock, flags);
2149                 /* during some PM-driven resume scenarios,
2150                  * these (async) unlinks complete immediately
2151                  */
2152                 ret = usb_unlink_urb(urb);
2153                 if (ret != -EINPROGRESS && ret != 0)
2154                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2155                 else
2156                         count++;
2157                 usb_put_urb(urb);
2158                 spin_lock_irqsave(&q->lock, flags);
2159         }
2160         spin_unlock_irqrestore(&q->lock, flags);
2161         return count;
2162 }
2163
2164 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2165 {
2166         struct lan78xx_net *dev = netdev_priv(netdev);
2167         int ll_mtu = new_mtu + netdev->hard_header_len;
2168         int old_hard_mtu = dev->hard_mtu;
2169         int old_rx_urb_size = dev->rx_urb_size;
2170         int ret;
2171
2172         /* no second zero-length packet read wanted after mtu-sized packets */
2173         if ((ll_mtu % dev->maxpacket) == 0)
2174                 return -EDOM;
2175
2176         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2177
2178         netdev->mtu = new_mtu;
2179
2180         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2181         if (dev->rx_urb_size == old_hard_mtu) {
2182                 dev->rx_urb_size = dev->hard_mtu;
2183                 if (dev->rx_urb_size > old_rx_urb_size) {
2184                         if (netif_running(dev->net)) {
2185                                 unlink_urbs(dev, &dev->rxq);
2186                                 tasklet_schedule(&dev->bh);
2187                         }
2188                 }
2189         }
2190
2191         return 0;
2192 }
2193
2194 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2195 {
2196         struct lan78xx_net *dev = netdev_priv(netdev);
2197         struct sockaddr *addr = p;
2198         u32 addr_lo, addr_hi;
2199         int ret;
2200
2201         if (netif_running(netdev))
2202                 return -EBUSY;
2203
2204         if (!is_valid_ether_addr(addr->sa_data))
2205                 return -EADDRNOTAVAIL;
2206
2207         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2208
2209         addr_lo = netdev->dev_addr[0] |
2210                   netdev->dev_addr[1] << 8 |
2211                   netdev->dev_addr[2] << 16 |
2212                   netdev->dev_addr[3] << 24;
2213         addr_hi = netdev->dev_addr[4] |
2214                   netdev->dev_addr[5] << 8;
2215
2216         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2217         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2218
2219         /* Added to support MAC address changes */
2220         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2221         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2222
2223         return 0;
2224 }
2225
2226 /* Enable or disable Rx checksum offload engine */
2227 static int lan78xx_set_features(struct net_device *netdev,
2228                                 netdev_features_t features)
2229 {
2230         struct lan78xx_net *dev = netdev_priv(netdev);
2231         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2232         unsigned long flags;
2233         int ret;
2234
2235         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2236
2237         if (features & NETIF_F_RXCSUM) {
2238                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2239                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2240         } else {
2241                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2242                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2243         }
2244
2245         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2246                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2247         else
2248                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2249
2250         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2251
2252         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2253
2254         return 0;
2255 }
2256
2257 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2258 {
2259         struct lan78xx_priv *pdata =
2260                         container_of(param, struct lan78xx_priv, set_vlan);
2261         struct lan78xx_net *dev = pdata->dev;
2262
2263         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2264                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2265 }
2266
2267 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2268                                    __be16 proto, u16 vid)
2269 {
2270         struct lan78xx_net *dev = netdev_priv(netdev);
2271         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2272         u16 vid_bit_index;
2273         u16 vid_dword_index;
2274
2275         vid_dword_index = (vid >> 5) & 0x7F;
2276         vid_bit_index = vid & 0x1F;
2277
2278         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2279
2280         /* defer register writes to a sleepable context */
2281         schedule_work(&pdata->set_vlan);
2282
2283         return 0;
2284 }
2285
2286 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2287                                     __be16 proto, u16 vid)
2288 {
2289         struct lan78xx_net *dev = netdev_priv(netdev);
2290         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2291         u16 vid_bit_index;
2292         u16 vid_dword_index;
2293
2294         vid_dword_index = (vid >> 5) & 0x7F;
2295         vid_bit_index = vid & 0x1F;
2296
2297         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2298
2299         /* defer register writes to a sleepable context */
2300         schedule_work(&pdata->set_vlan);
2301
2302         return 0;
2303 }
2304
2305 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2306 {
2307         int ret;
2308         u32 buf;
2309         u32 regs[6] = { 0 };
2310
2311         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2312         if (buf & USB_CFG1_LTM_ENABLE_) {
2313                 u8 temp[2];
2314                 /* Get values from EEPROM first */
2315                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2316                         if (temp[0] == 24) {
2317                                 ret = lan78xx_read_raw_eeprom(dev,
2318                                                               temp[1] * 2,
2319                                                               24,
2320                                                               (u8 *)regs);
2321                                 if (ret < 0)
2322                                         return;
2323                         }
2324                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2325                         if (temp[0] == 24) {
2326                                 ret = lan78xx_read_raw_otp(dev,
2327                                                            temp[1] * 2,
2328                                                            24,
2329                                                            (u8 *)regs);
2330                                 if (ret < 0)
2331                                         return;
2332                         }
2333                 }
2334         }
2335
2336         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2337         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2338         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2339         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2340         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2341         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2342 }
2343
2344 static int lan78xx_reset(struct lan78xx_net *dev)
2345 {
2346         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2347         u32 buf;
2348         int ret = 0;
2349         unsigned long timeout;
2350         u8 sig;
2351
2352         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2353         buf |= HW_CFG_LRST_;
2354         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2355
2356         timeout = jiffies + HZ;
2357         do {
2358                 mdelay(1);
2359                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2360                 if (time_after(jiffies, timeout)) {
2361                         netdev_warn(dev->net,
2362                                     "timeout on completion of LiteReset");
2363                         return -EIO;
2364                 }
2365         } while (buf & HW_CFG_LRST_);
2366
2367         lan78xx_init_mac_address(dev);
2368
2369         /* save DEVID for later usage */
2370         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2371         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2372         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2373
2374         /* Respond to the IN token with a NAK */
2375         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2376         buf |= USB_CFG_BIR_;
2377         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2378
2379         /* Init LTM */
2380         lan78xx_init_ltm(dev);
2381
2382         if (dev->udev->speed == USB_SPEED_SUPER) {
2383                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2384                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2385                 dev->rx_qlen = 4;
2386                 dev->tx_qlen = 4;
2387         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2388                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2389                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2390                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2391                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2392         } else {
2393                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2394                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2395                 dev->rx_qlen = 4;
2396                 dev->tx_qlen = 4;
2397         }
2398
2399         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2400         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2401
2402         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2403         buf |= HW_CFG_MEF_;
2404         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2405
2406         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2407         buf |= USB_CFG_BCE_;
2408         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2409
2410         /* set FIFO sizes */
2411         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2412         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2413
2414         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2415         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2416
2417         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2418         ret = lan78xx_write_reg(dev, FLOW, 0);
2419         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2420
2421         /* Don't need rfe_ctl_lock during initialisation */
2422         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2423         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2424         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2425
2426         /* Enable or disable checksum offload engines */
2427         lan78xx_set_features(dev->net, dev->net->features);
2428
2429         lan78xx_set_multicast(dev->net);
2430
2431         /* reset PHY */
2432         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2433         buf |= PMT_CTL_PHY_RST_;
2434         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2435
2436         timeout = jiffies + HZ;
2437         do {
2438                 mdelay(1);
2439                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2440                 if (time_after(jiffies, timeout)) {
2441                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2442                         return -EIO;
2443                 }
2444         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2445
2446         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2447         /* LAN7801 only has RGMII mode */
2448         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2449                 buf &= ~MAC_CR_GMII_EN_;
2450
2451         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2452                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2453                 if (!ret && sig != EEPROM_INDICATOR) {
2454                         /* Implies there is no external eeprom. Set mac speed */
2455                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2456                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2457                 }
2458         }
2459         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2460
2461         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2462         buf |= MAC_TX_TXEN_;
2463         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2464
2465         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2466         buf |= FCT_TX_CTL_EN_;
2467         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2468
2469         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2470
2471         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2472         buf |= MAC_RX_RXEN_;
2473         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2474
2475         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2476         buf |= FCT_RX_CTL_EN_;
2477         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2478
2479         return 0;
2480 }
2481
2482 static void lan78xx_init_stats(struct lan78xx_net *dev)
2483 {
2484         u32 *p;
2485         int i;
2486
2487         /* initialize for stats update
2488          * some counters are 20bits and some are 32bits
2489          */
2490         p = (u32 *)&dev->stats.rollover_max;
2491         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2492                 p[i] = 0xFFFFF;
2493
2494         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2495         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2496         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2497         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2498         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2499         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2500         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2501         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2502         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2503         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2504
2505         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2506 }
2507
2508 static int lan78xx_open(struct net_device *net)
2509 {
2510         struct lan78xx_net *dev = netdev_priv(net);
2511         int ret;
2512
2513         ret = usb_autopm_get_interface(dev->intf);
2514         if (ret < 0)
2515                 goto out;
2516
2517         phy_start(net->phydev);
2518
2519         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2520
2521         /* for Link Check */
2522         if (dev->urb_intr) {
2523                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2524                 if (ret < 0) {
2525                         netif_err(dev, ifup, dev->net,
2526                                   "intr submit %d\n", ret);
2527                         goto done;
2528                 }
2529         }
2530
2531         lan78xx_init_stats(dev);
2532
2533         set_bit(EVENT_DEV_OPEN, &dev->flags);
2534
2535         netif_start_queue(net);
2536
2537         dev->link_on = false;
2538
2539         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2540 done:
2541         usb_autopm_put_interface(dev->intf);
2542
2543 out:
2544         return ret;
2545 }
2546
2547 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2548 {
2549         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2550         DECLARE_WAITQUEUE(wait, current);
2551         int temp;
2552
2553         /* ensure there are no more active urbs */
2554         add_wait_queue(&unlink_wakeup, &wait);
2555         set_current_state(TASK_UNINTERRUPTIBLE);
2556         dev->wait = &unlink_wakeup;
2557         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2558
2559         /* maybe wait for deletions to finish. */
2560         while (!skb_queue_empty(&dev->rxq) &&
2561                !skb_queue_empty(&dev->txq) &&
2562                !skb_queue_empty(&dev->done)) {
2563                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2564                 set_current_state(TASK_UNINTERRUPTIBLE);
2565                 netif_dbg(dev, ifdown, dev->net,
2566                           "waited for %d urb completions\n", temp);
2567         }
2568         set_current_state(TASK_RUNNING);
2569         dev->wait = NULL;
2570         remove_wait_queue(&unlink_wakeup, &wait);
2571 }
2572
2573 static int lan78xx_stop(struct net_device *net)
2574 {
2575         struct lan78xx_net              *dev = netdev_priv(net);
2576
2577         if (timer_pending(&dev->stat_monitor))
2578                 del_timer_sync(&dev->stat_monitor);
2579
2580         if (net->phydev)
2581                 phy_stop(net->phydev);
2582
2583         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2584         netif_stop_queue(net);
2585
2586         netif_info(dev, ifdown, dev->net,
2587                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2588                    net->stats.rx_packets, net->stats.tx_packets,
2589                    net->stats.rx_errors, net->stats.tx_errors);
2590
2591         lan78xx_terminate_urbs(dev);
2592
2593         usb_kill_urb(dev->urb_intr);
2594
2595         skb_queue_purge(&dev->rxq_pause);
2596
2597         /* deferred work (task, timer, softirq) must also stop.
2598          * can't flush_scheduled_work() until we drop rtnl (later),
2599          * else workers could deadlock; so make workers a NOP.
2600          */
2601         dev->flags = 0;
2602         cancel_delayed_work_sync(&dev->wq);
2603         tasklet_kill(&dev->bh);
2604
2605         usb_autopm_put_interface(dev->intf);
2606
2607         return 0;
2608 }
2609
2610 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2611                                        struct sk_buff *skb, gfp_t flags)
2612 {
2613         u32 tx_cmd_a, tx_cmd_b;
2614
2615         if (skb_cow_head(skb, TX_OVERHEAD)) {
2616                 dev_kfree_skb_any(skb);
2617                 return NULL;
2618         }
2619
2620         if (skb_linearize(skb)) {
2621                 dev_kfree_skb_any(skb);
2622                 return NULL;
2623         }
2624
2625         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2626
2627         if (skb->ip_summed == CHECKSUM_PARTIAL)
2628                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2629
2630         tx_cmd_b = 0;
2631         if (skb_is_gso(skb)) {
2632                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2633
2634                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2635
2636                 tx_cmd_a |= TX_CMD_A_LSO_;
2637         }
2638
2639         if (skb_vlan_tag_present(skb)) {
2640                 tx_cmd_a |= TX_CMD_A_IVTG_;
2641                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2642         }
2643
2644         skb_push(skb, 4);
2645         cpu_to_le32s(&tx_cmd_b);
2646         memcpy(skb->data, &tx_cmd_b, 4);
2647
2648         skb_push(skb, 4);
2649         cpu_to_le32s(&tx_cmd_a);
2650         memcpy(skb->data, &tx_cmd_a, 4);
2651
2652         return skb;
2653 }
2654
2655 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2656                                struct sk_buff_head *list, enum skb_state state)
2657 {
2658         unsigned long flags;
2659         enum skb_state old_state;
2660         struct skb_data *entry = (struct skb_data *)skb->cb;
2661
2662         spin_lock_irqsave(&list->lock, flags);
2663         old_state = entry->state;
2664         entry->state = state;
2665
2666         __skb_unlink(skb, list);
2667         spin_unlock(&list->lock);
2668         spin_lock(&dev->done.lock);
2669
2670         __skb_queue_tail(&dev->done, skb);
2671         if (skb_queue_len(&dev->done) == 1)
2672                 tasklet_schedule(&dev->bh);
2673         spin_unlock_irqrestore(&dev->done.lock, flags);
2674
2675         return old_state;
2676 }
2677
2678 static void tx_complete(struct urb *urb)
2679 {
2680         struct sk_buff *skb = (struct sk_buff *)urb->context;
2681         struct skb_data *entry = (struct skb_data *)skb->cb;
2682         struct lan78xx_net *dev = entry->dev;
2683
2684         if (urb->status == 0) {
2685                 dev->net->stats.tx_packets += entry->num_of_packet;
2686                 dev->net->stats.tx_bytes += entry->length;
2687         } else {
2688                 dev->net->stats.tx_errors++;
2689
2690                 switch (urb->status) {
2691                 case -EPIPE:
2692                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2693                         break;
2694
2695                 /* software-driven interface shutdown */
2696                 case -ECONNRESET:
2697                 case -ESHUTDOWN:
2698                         break;
2699
2700                 case -EPROTO:
2701                 case -ETIME:
2702                 case -EILSEQ:
2703                         netif_stop_queue(dev->net);
2704                         break;
2705                 default:
2706                         netif_dbg(dev, tx_err, dev->net,
2707                                   "tx err %d\n", entry->urb->status);
2708                         break;
2709                 }
2710         }
2711
2712         usb_autopm_put_interface_async(dev->intf);
2713
2714         defer_bh(dev, skb, &dev->txq, tx_done);
2715 }
2716
2717 static void lan78xx_queue_skb(struct sk_buff_head *list,
2718                               struct sk_buff *newsk, enum skb_state state)
2719 {
2720         struct skb_data *entry = (struct skb_data *)newsk->cb;
2721
2722         __skb_queue_tail(list, newsk);
2723         entry->state = state;
2724 }
2725
2726 static netdev_tx_t
2727 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2728 {
2729         struct lan78xx_net *dev = netdev_priv(net);
2730         struct sk_buff *skb2 = NULL;
2731
2732         if (skb) {
2733                 skb_tx_timestamp(skb);
2734                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2735         }
2736
2737         if (skb2) {
2738                 skb_queue_tail(&dev->txq_pend, skb2);
2739
2740                 /* throttle TX patch at slower than SUPER SPEED USB */
2741                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2742                     (skb_queue_len(&dev->txq_pend) > 10))
2743                         netif_stop_queue(net);
2744         } else {
2745                 netif_dbg(dev, tx_err, dev->net,
2746                           "lan78xx_tx_prep return NULL\n");
2747                 dev->net->stats.tx_errors++;
2748                 dev->net->stats.tx_dropped++;
2749         }
2750
2751         tasklet_schedule(&dev->bh);
2752
2753         return NETDEV_TX_OK;
2754 }
2755
2756 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2757 {
2758         struct lan78xx_priv *pdata = NULL;
2759         int ret;
2760         int i;
2761
2762         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2763
2764         pdata = (struct lan78xx_priv *)(dev->data[0]);
2765         if (!pdata) {
2766                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2767                 return -ENOMEM;
2768         }
2769
2770         pdata->dev = dev;
2771
2772         spin_lock_init(&pdata->rfe_ctl_lock);
2773         mutex_init(&pdata->dataport_mutex);
2774
2775         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2776
2777         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2778                 pdata->vlan_table[i] = 0;
2779
2780         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2781
2782         dev->net->features = 0;
2783
2784         if (DEFAULT_TX_CSUM_ENABLE)
2785                 dev->net->features |= NETIF_F_HW_CSUM;
2786
2787         if (DEFAULT_RX_CSUM_ENABLE)
2788                 dev->net->features |= NETIF_F_RXCSUM;
2789
2790         if (DEFAULT_TSO_CSUM_ENABLE)
2791                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2792
2793         dev->net->hw_features = dev->net->features;
2794
2795         ret = lan78xx_setup_irq_domain(dev);
2796         if (ret < 0) {
2797                 netdev_warn(dev->net,
2798                             "lan78xx_setup_irq_domain() failed : %d", ret);
2799                 goto out1;
2800         }
2801
2802         dev->net->hard_header_len += TX_OVERHEAD;
2803         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2804
2805         /* Init all registers */
2806         ret = lan78xx_reset(dev);
2807         if (ret) {
2808                 netdev_warn(dev->net, "Registers INIT FAILED....");
2809                 goto out2;
2810         }
2811
2812         ret = lan78xx_mdio_init(dev);
2813         if (ret) {
2814                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2815                 goto out2;
2816         }
2817
2818         dev->net->flags |= IFF_MULTICAST;
2819
2820         pdata->wol = WAKE_MAGIC;
2821
2822         return ret;
2823
2824 out2:
2825         lan78xx_remove_irq_domain(dev);
2826
2827 out1:
2828         netdev_warn(dev->net, "Bind routine FAILED");
2829         cancel_work_sync(&pdata->set_multicast);
2830         cancel_work_sync(&pdata->set_vlan);
2831         kfree(pdata);
2832         return ret;
2833 }
2834
2835 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2836 {
2837         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2838
2839         lan78xx_remove_irq_domain(dev);
2840
2841         lan78xx_remove_mdio(dev);
2842
2843         if (pdata) {
2844                 cancel_work_sync(&pdata->set_multicast);
2845                 cancel_work_sync(&pdata->set_vlan);
2846                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2847                 kfree(pdata);
2848                 pdata = NULL;
2849                 dev->data[0] = 0;
2850         }
2851 }
2852
2853 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2854                                     struct sk_buff *skb,
2855                                     u32 rx_cmd_a, u32 rx_cmd_b)
2856 {
2857         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2858             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2859                 skb->ip_summed = CHECKSUM_NONE;
2860         } else {
2861                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2862                 skb->ip_summed = CHECKSUM_COMPLETE;
2863         }
2864 }
2865
2866 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2867 {
2868         int             status;
2869
2870         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2871                 skb_queue_tail(&dev->rxq_pause, skb);
2872                 return;
2873         }
2874
2875         dev->net->stats.rx_packets++;
2876         dev->net->stats.rx_bytes += skb->len;
2877
2878         skb->protocol = eth_type_trans(skb, dev->net);
2879
2880         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2881                   skb->len + sizeof(struct ethhdr), skb->protocol);
2882         memset(skb->cb, 0, sizeof(struct skb_data));
2883
2884         if (skb_defer_rx_timestamp(skb))
2885                 return;
2886
2887         status = netif_rx(skb);
2888         if (status != NET_RX_SUCCESS)
2889                 netif_dbg(dev, rx_err, dev->net,
2890                           "netif_rx status %d\n", status);
2891 }
2892
2893 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2894 {
2895         if (skb->len < dev->net->hard_header_len)
2896                 return 0;
2897
2898         while (skb->len > 0) {
2899                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2900                 u16 rx_cmd_c;
2901                 struct sk_buff *skb2;
2902                 unsigned char *packet;
2903
2904                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2905                 le32_to_cpus(&rx_cmd_a);
2906                 skb_pull(skb, sizeof(rx_cmd_a));
2907
2908                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2909                 le32_to_cpus(&rx_cmd_b);
2910                 skb_pull(skb, sizeof(rx_cmd_b));
2911
2912                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2913                 le16_to_cpus(&rx_cmd_c);
2914                 skb_pull(skb, sizeof(rx_cmd_c));
2915
2916                 packet = skb->data;
2917
2918                 /* get the packet length */
2919                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2920                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2921
2922                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2923                         netif_dbg(dev, rx_err, dev->net,
2924                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2925                 } else {
2926                         /* last frame in this batch */
2927                         if (skb->len == size) {
2928                                 lan78xx_rx_csum_offload(dev, skb,
2929                                                         rx_cmd_a, rx_cmd_b);
2930
2931                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2932                                 skb->truesize = size + sizeof(struct sk_buff);
2933
2934                                 return 1;
2935                         }
2936
2937                         skb2 = skb_clone(skb, GFP_ATOMIC);
2938                         if (unlikely(!skb2)) {
2939                                 netdev_warn(dev->net, "Error allocating skb");
2940                                 return 0;
2941                         }
2942
2943                         skb2->len = size;
2944                         skb2->data = packet;
2945                         skb_set_tail_pointer(skb2, size);
2946
2947                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2948
2949                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2950                         skb2->truesize = size + sizeof(struct sk_buff);
2951
2952                         lan78xx_skb_return(dev, skb2);
2953                 }
2954
2955                 skb_pull(skb, size);
2956
2957                 /* padding bytes before the next frame starts */
2958                 if (skb->len)
2959                         skb_pull(skb, align_count);
2960         }
2961
2962         return 1;
2963 }
2964
2965 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2966 {
2967         if (!lan78xx_rx(dev, skb)) {
2968                 dev->net->stats.rx_errors++;
2969                 goto done;
2970         }
2971
2972         if (skb->len) {
2973                 lan78xx_skb_return(dev, skb);
2974                 return;
2975         }
2976
2977         netif_dbg(dev, rx_err, dev->net, "drop\n");
2978         dev->net->stats.rx_errors++;
2979 done:
2980         skb_queue_tail(&dev->done, skb);
2981 }
2982
2983 static void rx_complete(struct urb *urb);
2984
2985 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2986 {
2987         struct sk_buff *skb;
2988         struct skb_data *entry;
2989         unsigned long lockflags;
2990         size_t size = dev->rx_urb_size;
2991         int ret = 0;
2992
2993         skb = netdev_alloc_skb_ip_align(dev->net, size);
2994         if (!skb) {
2995                 usb_free_urb(urb);
2996                 return -ENOMEM;
2997         }
2998
2999         entry = (struct skb_data *)skb->cb;
3000         entry->urb = urb;
3001         entry->dev = dev;
3002         entry->length = 0;
3003
3004         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3005                           skb->data, size, rx_complete, skb);
3006
3007         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3008
3009         if (netif_device_present(dev->net) &&
3010             netif_running(dev->net) &&
3011             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3012             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3013                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3014                 switch (ret) {
3015                 case 0:
3016                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3017                         break;
3018                 case -EPIPE:
3019                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3020                         break;
3021                 case -ENODEV:
3022                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3023                         netif_device_detach(dev->net);
3024                         break;
3025                 case -EHOSTUNREACH:
3026                         ret = -ENOLINK;
3027                         break;
3028                 default:
3029                         netif_dbg(dev, rx_err, dev->net,
3030                                   "rx submit, %d\n", ret);
3031                         tasklet_schedule(&dev->bh);
3032                 }
3033         } else {
3034                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3035                 ret = -ENOLINK;
3036         }
3037         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3038         if (ret) {
3039                 dev_kfree_skb_any(skb);
3040                 usb_free_urb(urb);
3041         }
3042         return ret;
3043 }
3044
3045 static void rx_complete(struct urb *urb)
3046 {
3047         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3048         struct skb_data *entry = (struct skb_data *)skb->cb;
3049         struct lan78xx_net *dev = entry->dev;
3050         int urb_status = urb->status;
3051         enum skb_state state;
3052
3053         skb_put(skb, urb->actual_length);
3054         state = rx_done;
3055         entry->urb = NULL;
3056
3057         switch (urb_status) {
3058         case 0:
3059                 if (skb->len < dev->net->hard_header_len) {
3060                         state = rx_cleanup;
3061                         dev->net->stats.rx_errors++;
3062                         dev->net->stats.rx_length_errors++;
3063                         netif_dbg(dev, rx_err, dev->net,
3064                                   "rx length %d\n", skb->len);
3065                 }
3066                 usb_mark_last_busy(dev->udev);
3067                 break;
3068         case -EPIPE:
3069                 dev->net->stats.rx_errors++;
3070                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3071                 /* FALLTHROUGH */
3072         case -ECONNRESET:                               /* async unlink */
3073         case -ESHUTDOWN:                                /* hardware gone */
3074                 netif_dbg(dev, ifdown, dev->net,
3075                           "rx shutdown, code %d\n", urb_status);
3076                 state = rx_cleanup;
3077                 entry->urb = urb;
3078                 urb = NULL;
3079                 break;
3080         case -EPROTO:
3081         case -ETIME:
3082         case -EILSEQ:
3083                 dev->net->stats.rx_errors++;
3084                 state = rx_cleanup;
3085                 entry->urb = urb;
3086                 urb = NULL;
3087                 break;
3088
3089         /* data overrun ... flush fifo? */
3090         case -EOVERFLOW:
3091                 dev->net->stats.rx_over_errors++;
3092                 /* FALLTHROUGH */
3093
3094         default:
3095                 state = rx_cleanup;
3096                 dev->net->stats.rx_errors++;
3097                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3098                 break;
3099         }
3100
3101         state = defer_bh(dev, skb, &dev->rxq, state);
3102
3103         if (urb) {
3104                 if (netif_running(dev->net) &&
3105                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3106                     state != unlink_start) {
3107                         rx_submit(dev, urb, GFP_ATOMIC);
3108                         return;
3109                 }
3110                 usb_free_urb(urb);
3111         }
3112         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3113 }
3114
3115 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3116 {
3117         int length;
3118         struct urb *urb = NULL;
3119         struct skb_data *entry;
3120         unsigned long flags;
3121         struct sk_buff_head *tqp = &dev->txq_pend;
3122         struct sk_buff *skb, *skb2;
3123         int ret;
3124         int count, pos;
3125         int skb_totallen, pkt_cnt;
3126
3127         skb_totallen = 0;
3128         pkt_cnt = 0;
3129         count = 0;
3130         length = 0;
3131         spin_lock_irqsave(&tqp->lock, flags);
3132         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3133                 if (skb_is_gso(skb)) {
3134                         if (pkt_cnt) {
3135                                 /* handle previous packets first */
3136                                 break;
3137                         }
3138                         count = 1;
3139                         length = skb->len - TX_OVERHEAD;
3140                         __skb_unlink(skb, tqp);
3141                         spin_unlock_irqrestore(&tqp->lock, flags);
3142                         goto gso_skb;
3143                 }
3144
3145                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3146                         break;
3147                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3148                 pkt_cnt++;
3149         }
3150         spin_unlock_irqrestore(&tqp->lock, flags);
3151
3152         /* copy to a single skb */
3153         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3154         if (!skb)
3155                 goto drop;
3156
3157         skb_put(skb, skb_totallen);
3158
3159         for (count = pos = 0; count < pkt_cnt; count++) {
3160                 skb2 = skb_dequeue(tqp);
3161                 if (skb2) {
3162                         length += (skb2->len - TX_OVERHEAD);
3163                         memcpy(skb->data + pos, skb2->data, skb2->len);
3164                         pos += roundup(skb2->len, sizeof(u32));
3165                         dev_kfree_skb(skb2);
3166                 }
3167         }
3168
3169 gso_skb:
3170         urb = usb_alloc_urb(0, GFP_ATOMIC);
3171         if (!urb)
3172                 goto drop;
3173
3174         entry = (struct skb_data *)skb->cb;
3175         entry->urb = urb;
3176         entry->dev = dev;
3177         entry->length = length;
3178         entry->num_of_packet = count;
3179
3180         spin_lock_irqsave(&dev->txq.lock, flags);
3181         ret = usb_autopm_get_interface_async(dev->intf);
3182         if (ret < 0) {
3183                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3184                 goto drop;
3185         }
3186
3187         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3188                           skb->data, skb->len, tx_complete, skb);
3189
3190         if (length % dev->maxpacket == 0) {
3191                 /* send USB_ZERO_PACKET */
3192                 urb->transfer_flags |= URB_ZERO_PACKET;
3193         }
3194
3195 #ifdef CONFIG_PM
3196         /* if this triggers the device is still a sleep */
3197         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3198                 /* transmission will be done in resume */
3199                 usb_anchor_urb(urb, &dev->deferred);
3200                 /* no use to process more packets */
3201                 netif_stop_queue(dev->net);
3202                 usb_put_urb(urb);
3203                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3204                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3205                 return;
3206         }
3207 #endif
3208
3209         ret = usb_submit_urb(urb, GFP_ATOMIC);
3210         switch (ret) {
3211         case 0:
3212                 netif_trans_update(dev->net);
3213                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3214                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3215                         netif_stop_queue(dev->net);
3216                 break;
3217         case -EPIPE:
3218                 netif_stop_queue(dev->net);
3219                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3220                 usb_autopm_put_interface_async(dev->intf);
3221                 break;
3222         default:
3223                 usb_autopm_put_interface_async(dev->intf);
3224                 netif_dbg(dev, tx_err, dev->net,
3225                           "tx: submit urb err %d\n", ret);
3226                 break;
3227         }
3228
3229         spin_unlock_irqrestore(&dev->txq.lock, flags);
3230
3231         if (ret) {
3232                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3233 drop:
3234                 dev->net->stats.tx_dropped++;
3235                 if (skb)
3236                         dev_kfree_skb_any(skb);
3237                 usb_free_urb(urb);
3238         } else
3239                 netif_dbg(dev, tx_queued, dev->net,
3240                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3241 }
3242
3243 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3244 {
3245         struct urb *urb;
3246         int i;
3247
3248         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3249                 for (i = 0; i < 10; i++) {
3250                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3251                                 break;
3252                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3253                         if (urb)
3254                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3255                                         return;
3256                 }
3257
3258                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3259                         tasklet_schedule(&dev->bh);
3260         }
3261         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3262                 netif_wake_queue(dev->net);
3263 }
3264
3265 static void lan78xx_bh(unsigned long param)
3266 {
3267         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3268         struct sk_buff *skb;
3269         struct skb_data *entry;
3270
3271         while ((skb = skb_dequeue(&dev->done))) {
3272                 entry = (struct skb_data *)(skb->cb);
3273                 switch (entry->state) {
3274                 case rx_done:
3275                         entry->state = rx_cleanup;
3276                         rx_process(dev, skb);
3277                         continue;
3278                 case tx_done:
3279                         usb_free_urb(entry->urb);
3280                         dev_kfree_skb(skb);
3281                         continue;
3282                 case rx_cleanup:
3283                         usb_free_urb(entry->urb);
3284                         dev_kfree_skb(skb);
3285                         continue;
3286                 default:
3287                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3288                         return;
3289                 }
3290         }
3291
3292         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3293                 /* reset update timer delta */
3294                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3295                         dev->delta = 1;
3296                         mod_timer(&dev->stat_monitor,
3297                                   jiffies + STAT_UPDATE_TIMER);
3298                 }
3299
3300                 if (!skb_queue_empty(&dev->txq_pend))
3301                         lan78xx_tx_bh(dev);
3302
3303                 if (!timer_pending(&dev->delay) &&
3304                     !test_bit(EVENT_RX_HALT, &dev->flags))
3305                         lan78xx_rx_bh(dev);
3306         }
3307 }
3308
3309 static void lan78xx_delayedwork(struct work_struct *work)
3310 {
3311         int status;
3312         struct lan78xx_net *dev;
3313
3314         dev = container_of(work, struct lan78xx_net, wq.work);
3315
3316         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3317                 unlink_urbs(dev, &dev->txq);
3318                 status = usb_autopm_get_interface(dev->intf);
3319                 if (status < 0)
3320                         goto fail_pipe;
3321                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3322                 usb_autopm_put_interface(dev->intf);
3323                 if (status < 0 &&
3324                     status != -EPIPE &&
3325                     status != -ESHUTDOWN) {
3326                         if (netif_msg_tx_err(dev))
3327 fail_pipe:
3328                                 netdev_err(dev->net,
3329                                            "can't clear tx halt, status %d\n",
3330                                            status);
3331                 } else {
3332                         clear_bit(EVENT_TX_HALT, &dev->flags);
3333                         if (status != -ESHUTDOWN)
3334                                 netif_wake_queue(dev->net);
3335                 }
3336         }
3337         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3338                 unlink_urbs(dev, &dev->rxq);
3339                 status = usb_autopm_get_interface(dev->intf);
3340                 if (status < 0)
3341                                 goto fail_halt;
3342                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3343                 usb_autopm_put_interface(dev->intf);
3344                 if (status < 0 &&
3345                     status != -EPIPE &&
3346                     status != -ESHUTDOWN) {
3347                         if (netif_msg_rx_err(dev))
3348 fail_halt:
3349                                 netdev_err(dev->net,
3350                                            "can't clear rx halt, status %d\n",
3351                                            status);
3352                 } else {
3353                         clear_bit(EVENT_RX_HALT, &dev->flags);
3354                         tasklet_schedule(&dev->bh);
3355                 }
3356         }
3357
3358         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3359                 int ret = 0;
3360
3361                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3362                 status = usb_autopm_get_interface(dev->intf);
3363                 if (status < 0)
3364                         goto skip_reset;
3365                 if (lan78xx_link_reset(dev) < 0) {
3366                         usb_autopm_put_interface(dev->intf);
3367 skip_reset:
3368                         netdev_info(dev->net, "link reset failed (%d)\n",
3369                                     ret);
3370                 } else {
3371                         usb_autopm_put_interface(dev->intf);
3372                 }
3373         }
3374
3375         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3376                 lan78xx_update_stats(dev);
3377
3378                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3379
3380                 mod_timer(&dev->stat_monitor,
3381                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3382
3383                 dev->delta = min((dev->delta * 2), 50);
3384         }
3385 }
3386
3387 static void intr_complete(struct urb *urb)
3388 {
3389         struct lan78xx_net *dev = urb->context;
3390         int status = urb->status;
3391
3392         switch (status) {
3393         /* success */
3394         case 0:
3395                 lan78xx_status(dev, urb);
3396                 break;
3397
3398         /* software-driven interface shutdown */
3399         case -ENOENT:                   /* urb killed */
3400         case -ESHUTDOWN:                /* hardware gone */
3401                 netif_dbg(dev, ifdown, dev->net,
3402                           "intr shutdown, code %d\n", status);
3403                 return;
3404
3405         /* NOTE:  not throttling like RX/TX, since this endpoint
3406          * already polls infrequently
3407          */
3408         default:
3409                 netdev_dbg(dev->net, "intr status %d\n", status);
3410                 break;
3411         }
3412
3413         if (!netif_running(dev->net))
3414                 return;
3415
3416         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3417         status = usb_submit_urb(urb, GFP_ATOMIC);
3418         if (status != 0)
3419                 netif_err(dev, timer, dev->net,
3420                           "intr resubmit --> %d\n", status);
3421 }
3422
3423 static void lan78xx_disconnect(struct usb_interface *intf)
3424 {
3425         struct lan78xx_net              *dev;
3426         struct usb_device               *udev;
3427         struct net_device               *net;
3428
3429         dev = usb_get_intfdata(intf);
3430         usb_set_intfdata(intf, NULL);
3431         if (!dev)
3432                 return;
3433
3434         udev = interface_to_usbdev(intf);
3435         net = dev->net;
3436
3437         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3438         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3439
3440         phy_disconnect(net->phydev);
3441
3442         unregister_netdev(net);
3443
3444         cancel_delayed_work_sync(&dev->wq);
3445
3446         usb_scuttle_anchored_urbs(&dev->deferred);
3447
3448         lan78xx_unbind(dev, intf);
3449
3450         usb_kill_urb(dev->urb_intr);
3451         usb_free_urb(dev->urb_intr);
3452
3453         free_netdev(net);
3454         usb_put_dev(udev);
3455 }
3456
3457 static void lan78xx_tx_timeout(struct net_device *net)
3458 {
3459         struct lan78xx_net *dev = netdev_priv(net);
3460
3461         unlink_urbs(dev, &dev->txq);
3462         tasklet_schedule(&dev->bh);
3463 }
3464
3465 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3466                                                 struct net_device *netdev,
3467                                                 netdev_features_t features)
3468 {
3469         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3470                 features &= ~NETIF_F_GSO_MASK;
3471
3472         features = vlan_features_check(skb, features);
3473         features = vxlan_features_check(skb, features);
3474
3475         return features;
3476 }
3477
3478 static const struct net_device_ops lan78xx_netdev_ops = {
3479         .ndo_open               = lan78xx_open,
3480         .ndo_stop               = lan78xx_stop,
3481         .ndo_start_xmit         = lan78xx_start_xmit,
3482         .ndo_tx_timeout         = lan78xx_tx_timeout,
3483         .ndo_change_mtu         = lan78xx_change_mtu,
3484         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3485         .ndo_validate_addr      = eth_validate_addr,
3486         .ndo_do_ioctl           = lan78xx_ioctl,
3487         .ndo_set_rx_mode        = lan78xx_set_multicast,
3488         .ndo_set_features       = lan78xx_set_features,
3489         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3490         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3491         .ndo_features_check     = lan78xx_features_check,
3492 };
3493
3494 static void lan78xx_stat_monitor(unsigned long param)
3495 {
3496         struct lan78xx_net *dev;
3497
3498         dev = (struct lan78xx_net *)param;
3499
3500         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3501 }
3502
3503 static int lan78xx_probe(struct usb_interface *intf,
3504                          const struct usb_device_id *id)
3505 {
3506         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3507         struct lan78xx_net *dev;
3508         struct net_device *netdev;
3509         struct usb_device *udev;
3510         int ret;
3511         unsigned maxp;
3512         unsigned period;
3513         u8 *buf = NULL;
3514
3515         udev = interface_to_usbdev(intf);
3516         udev = usb_get_dev(udev);
3517
3518         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3519         if (!netdev) {
3520                 dev_err(&intf->dev, "Error: OOM\n");
3521                 ret = -ENOMEM;
3522                 goto out1;
3523         }
3524
3525         /* netdev_printk() needs this */
3526         SET_NETDEV_DEV(netdev, &intf->dev);
3527
3528         dev = netdev_priv(netdev);
3529         dev->udev = udev;
3530         dev->intf = intf;
3531         dev->net = netdev;
3532         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3533                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3534
3535         skb_queue_head_init(&dev->rxq);
3536         skb_queue_head_init(&dev->txq);
3537         skb_queue_head_init(&dev->done);
3538         skb_queue_head_init(&dev->rxq_pause);
3539         skb_queue_head_init(&dev->txq_pend);
3540         mutex_init(&dev->phy_mutex);
3541
3542         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3543         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3544         init_usb_anchor(&dev->deferred);
3545
3546         netdev->netdev_ops = &lan78xx_netdev_ops;
3547         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3548         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3549
3550         dev->stat_monitor.function = lan78xx_stat_monitor;
3551         dev->stat_monitor.data = (unsigned long)dev;
3552         dev->delta = 1;
3553         init_timer(&dev->stat_monitor);
3554
3555         mutex_init(&dev->stats.access_lock);
3556
3557         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3558                 ret = -ENODEV;
3559                 goto out2;
3560         }
3561
3562         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3563         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3564         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3565                 ret = -ENODEV;
3566                 goto out2;
3567         }
3568
3569         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3570         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3571         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3572                 ret = -ENODEV;
3573                 goto out2;
3574         }
3575
3576         ep_intr = &intf->cur_altsetting->endpoint[2];
3577         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3578                 ret = -ENODEV;
3579                 goto out2;
3580         }
3581
3582         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3583                                         usb_endpoint_num(&ep_intr->desc));
3584
3585         ret = lan78xx_bind(dev, intf);
3586         if (ret < 0)
3587                 goto out2;
3588         strcpy(netdev->name, "eth%d");
3589
3590         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3591                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3592
3593         /* MTU range: 68 - 9000 */
3594         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3595         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3596
3597         period = ep_intr->desc.bInterval;
3598         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3599         buf = kmalloc(maxp, GFP_KERNEL);
3600         if (buf) {
3601                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3602                 if (!dev->urb_intr) {
3603                         ret = -ENOMEM;
3604                         kfree(buf);
3605                         goto out3;
3606                 } else {
3607                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3608                                          dev->pipe_intr, buf, maxp,
3609                                          intr_complete, dev, period);
3610                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3611                 }
3612         }
3613
3614         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3615
3616         /* Reject broken descriptors. */
3617         if (dev->maxpacket == 0) {
3618                 ret = -ENODEV;
3619                 goto out4;
3620         }
3621
3622         /* driver requires remote-wakeup capability during autosuspend. */
3623         intf->needs_remote_wakeup = 1;
3624
3625         ret = lan78xx_phy_init(dev);
3626         if (ret < 0)
3627                 goto out4;
3628
3629         ret = register_netdev(netdev);
3630         if (ret != 0) {
3631                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3632                 goto out5;
3633         }
3634
3635         usb_set_intfdata(intf, dev);
3636
3637         ret = device_set_wakeup_enable(&udev->dev, true);
3638
3639          /* Default delay of 2sec has more overhead than advantage.
3640           * Set to 10sec as default.
3641           */
3642         pm_runtime_set_autosuspend_delay(&udev->dev,
3643                                          DEFAULT_AUTOSUSPEND_DELAY);
3644
3645         return 0;
3646
3647 out5:
3648         phy_disconnect(netdev->phydev);
3649 out4:
3650         usb_free_urb(dev->urb_intr);
3651 out3:
3652         lan78xx_unbind(dev, intf);
3653 out2:
3654         free_netdev(netdev);
3655 out1:
3656         usb_put_dev(udev);
3657
3658         return ret;
3659 }
3660
3661 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3662 {
3663         const u16 crc16poly = 0x8005;
3664         int i;
3665         u16 bit, crc, msb;
3666         u8 data;
3667
3668         crc = 0xFFFF;
3669         for (i = 0; i < len; i++) {
3670                 data = *buf++;
3671                 for (bit = 0; bit < 8; bit++) {
3672                         msb = crc >> 15;
3673                         crc <<= 1;
3674
3675                         if (msb ^ (u16)(data & 1)) {
3676                                 crc ^= crc16poly;
3677                                 crc |= (u16)0x0001U;
3678                         }
3679                         data >>= 1;
3680                 }
3681         }
3682
3683         return crc;
3684 }
3685
3686 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3687 {
3688         u32 buf;
3689         int ret;
3690         int mask_index;
3691         u16 crc;
3692         u32 temp_wucsr;
3693         u32 temp_pmt_ctl;
3694         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3695         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3696         const u8 arp_type[2] = { 0x08, 0x06 };
3697
3698         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3699         buf &= ~MAC_TX_TXEN_;
3700         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3701         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3702         buf &= ~MAC_RX_RXEN_;
3703         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3704
3705         ret = lan78xx_write_reg(dev, WUCSR, 0);
3706         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3707         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3708
3709         temp_wucsr = 0;
3710
3711         temp_pmt_ctl = 0;
3712         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3713         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3714         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3715
3716         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3717                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3718
3719         mask_index = 0;
3720         if (wol & WAKE_PHY) {
3721                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3722
3723                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3724                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3725                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3726         }
3727         if (wol & WAKE_MAGIC) {
3728                 temp_wucsr |= WUCSR_MPEN_;
3729
3730                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3731                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3732                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3733         }
3734         if (wol & WAKE_BCAST) {
3735                 temp_wucsr |= WUCSR_BCST_EN_;
3736
3737                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3738                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3739                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3740         }
3741         if (wol & WAKE_MCAST) {
3742                 temp_wucsr |= WUCSR_WAKE_EN_;
3743
3744                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3745                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3746                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3747                                         WUF_CFGX_EN_ |
3748                                         WUF_CFGX_TYPE_MCAST_ |
3749                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3750                                         (crc & WUF_CFGX_CRC16_MASK_));
3751
3752                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3753                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3754                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3755                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3756                 mask_index++;
3757
3758                 /* for IPv6 Multicast */
3759                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3760                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3761                                         WUF_CFGX_EN_ |
3762                                         WUF_CFGX_TYPE_MCAST_ |
3763                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3764                                         (crc & WUF_CFGX_CRC16_MASK_));
3765
3766                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3767                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3768                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3769                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3770                 mask_index++;
3771
3772                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3773                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3774                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3775         }
3776         if (wol & WAKE_UCAST) {
3777                 temp_wucsr |= WUCSR_PFDA_EN_;
3778
3779                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3780                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3781                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3782         }
3783         if (wol & WAKE_ARP) {
3784                 temp_wucsr |= WUCSR_WAKE_EN_;
3785
3786                 /* set WUF_CFG & WUF_MASK
3787                  * for packettype (offset 12,13) = ARP (0x0806)
3788                  */
3789                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3790                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3791                                         WUF_CFGX_EN_ |
3792                                         WUF_CFGX_TYPE_ALL_ |
3793                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3794                                         (crc & WUF_CFGX_CRC16_MASK_));
3795
3796                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3797                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3798                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3799                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3800                 mask_index++;
3801
3802                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3803                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3804                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3805         }
3806
3807         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3808
3809         /* when multiple WOL bits are set */
3810         if (hweight_long((unsigned long)wol) > 1) {
3811                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3812                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3813                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3814         }
3815         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3816
3817         /* clear WUPS */
3818         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3819         buf |= PMT_CTL_WUPS_MASK_;
3820         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3821
3822         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3823         buf |= MAC_RX_RXEN_;
3824         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3825
3826         return 0;
3827 }
3828
3829 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3830 {
3831         struct lan78xx_net *dev = usb_get_intfdata(intf);
3832         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3833         u32 buf;
3834         int ret;
3835         int event;
3836
3837         event = message.event;
3838
3839         if (!dev->suspend_count++) {
3840                 spin_lock_irq(&dev->txq.lock);
3841                 /* don't autosuspend while transmitting */
3842                 if ((skb_queue_len(&dev->txq) ||
3843                      skb_queue_len(&dev->txq_pend)) &&
3844                         PMSG_IS_AUTO(message)) {
3845                         spin_unlock_irq(&dev->txq.lock);
3846                         ret = -EBUSY;
3847                         goto out;
3848                 } else {
3849                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3850                         spin_unlock_irq(&dev->txq.lock);
3851                 }
3852
3853                 /* stop TX & RX */
3854                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3855                 buf &= ~MAC_TX_TXEN_;
3856                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3857                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3858                 buf &= ~MAC_RX_RXEN_;
3859                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3860
3861                 /* empty out the rx and queues */
3862                 netif_device_detach(dev->net);
3863                 lan78xx_terminate_urbs(dev);
3864                 usb_kill_urb(dev->urb_intr);
3865
3866                 /* reattach */
3867                 netif_device_attach(dev->net);
3868         }
3869
3870         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3871                 del_timer(&dev->stat_monitor);
3872
3873                 if (PMSG_IS_AUTO(message)) {
3874                         /* auto suspend (selective suspend) */
3875                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3876                         buf &= ~MAC_TX_TXEN_;
3877                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3878                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3879                         buf &= ~MAC_RX_RXEN_;
3880                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3881
3882                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3883                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3884                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3885
3886                         /* set goodframe wakeup */
3887                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3888
3889                         buf |= WUCSR_RFE_WAKE_EN_;
3890                         buf |= WUCSR_STORE_WAKE_;
3891
3892                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3893
3894                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3895
3896                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3897                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3898
3899                         buf |= PMT_CTL_PHY_WAKE_EN_;
3900                         buf |= PMT_CTL_WOL_EN_;
3901                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3902                         buf |= PMT_CTL_SUS_MODE_3_;
3903
3904                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3905
3906                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3907
3908                         buf |= PMT_CTL_WUPS_MASK_;
3909
3910                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3911
3912                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3913                         buf |= MAC_RX_RXEN_;
3914                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3915                 } else {
3916                         lan78xx_set_suspend(dev, pdata->wol);
3917                 }
3918         }
3919
3920         ret = 0;
3921 out:
3922         return ret;
3923 }
3924
3925 static int lan78xx_resume(struct usb_interface *intf)
3926 {
3927         struct lan78xx_net *dev = usb_get_intfdata(intf);
3928         struct sk_buff *skb;
3929         struct urb *res;
3930         int ret;
3931         u32 buf;
3932
3933         if (!timer_pending(&dev->stat_monitor)) {
3934                 dev->delta = 1;
3935                 mod_timer(&dev->stat_monitor,
3936                           jiffies + STAT_UPDATE_TIMER);
3937         }
3938
3939         if (!--dev->suspend_count) {
3940                 /* resume interrupt URBs */
3941                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3942                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3943
3944                 spin_lock_irq(&dev->txq.lock);
3945                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3946                         skb = (struct sk_buff *)res->context;
3947                         ret = usb_submit_urb(res, GFP_ATOMIC);
3948                         if (ret < 0) {
3949                                 dev_kfree_skb_any(skb);
3950                                 usb_free_urb(res);
3951                                 usb_autopm_put_interface_async(dev->intf);
3952                         } else {
3953                                 netif_trans_update(dev->net);
3954                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3955                         }
3956                 }
3957
3958                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3959                 spin_unlock_irq(&dev->txq.lock);
3960
3961                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3962                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3963                                 netif_start_queue(dev->net);
3964                         tasklet_schedule(&dev->bh);
3965                 }
3966         }
3967
3968         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3969         ret = lan78xx_write_reg(dev, WUCSR, 0);
3970         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3971
3972         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3973                                              WUCSR2_ARP_RCD_ |
3974                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3975                                              WUCSR2_IPV4_TCPSYN_RCD_);
3976
3977         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3978                                             WUCSR_EEE_RX_WAKE_ |
3979                                             WUCSR_PFDA_FR_ |
3980                                             WUCSR_RFE_WAKE_FR_ |
3981                                             WUCSR_WUFR_ |
3982                                             WUCSR_MPR_ |
3983                                             WUCSR_BCST_FR_);
3984
3985         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3986         buf |= MAC_TX_TXEN_;
3987         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3988
3989         return 0;
3990 }
3991
3992 static int lan78xx_reset_resume(struct usb_interface *intf)
3993 {
3994         struct lan78xx_net *dev = usb_get_intfdata(intf);
3995
3996         lan78xx_reset(dev);
3997
3998         phy_start(dev->net->phydev);
3999
4000         return lan78xx_resume(intf);
4001 }
4002
4003 static const struct usb_device_id products[] = {
4004         {
4005         /* LAN7800 USB Gigabit Ethernet Device */
4006         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4007         },
4008         {
4009         /* LAN7850 USB Gigabit Ethernet Device */
4010         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4011         },
4012         {
4013         /* LAN7801 USB Gigabit Ethernet Device */
4014         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4015         },
4016         {},
4017 };
4018 MODULE_DEVICE_TABLE(usb, products);
4019
4020 static struct usb_driver lan78xx_driver = {
4021         .name                   = DRIVER_NAME,
4022         .id_table               = products,
4023         .probe                  = lan78xx_probe,
4024         .disconnect             = lan78xx_disconnect,
4025         .suspend                = lan78xx_suspend,
4026         .resume                 = lan78xx_resume,
4027         .reset_resume           = lan78xx_reset_resume,
4028         .supports_autosuspend   = 1,
4029         .disable_hub_initiated_lpm = 1,
4030 };
4031
4032 module_usb_driver(lan78xx_driver);
4033
4034 MODULE_AUTHOR(DRIVER_AUTHOR);
4035 MODULE_DESCRIPTION(DRIVER_DESC);
4036 MODULE_LICENSE("GPL");