GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy_fixed.h>
41 #include <linux/of_mdio.h>
42 #include <linux/of_net.h>
43 #include "lan78xx.h"
44
45 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
46 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
47 #define DRIVER_NAME     "lan78xx"
48
49 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
50 #define THROTTLE_JIFFIES                (HZ / 8)
51 #define UNLINK_TIMEOUT_MS               3
52
53 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
54
55 #define SS_USB_PKT_SIZE                 (1024)
56 #define HS_USB_PKT_SIZE                 (512)
57 #define FS_USB_PKT_SIZE                 (64)
58
59 #define MAX_RX_FIFO_SIZE                (12 * 1024)
60 #define MAX_TX_FIFO_SIZE                (12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY           (0x0800)
63 #define MAX_SINGLE_PACKET_SIZE          (9000)
64 #define DEFAULT_TX_CSUM_ENABLE          (true)
65 #define DEFAULT_RX_CSUM_ENABLE          (true)
66 #define DEFAULT_TSO_CSUM_ENABLE         (true)
67 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
68 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
69 #define TX_OVERHEAD                     (8)
70 #define RXW_PADDING                     2
71
72 #define LAN78XX_USB_VENDOR_ID           (0x0424)
73 #define LAN7800_USB_PRODUCT_ID          (0x7800)
74 #define LAN7850_USB_PRODUCT_ID          (0x7850)
75 #define LAN7801_USB_PRODUCT_ID          (0x7801)
76 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
77 #define LAN78XX_OTP_MAGIC               (0x78F3)
78 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
79 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
80
81 #define MII_READ                        1
82 #define MII_WRITE                       0
83
84 #define EEPROM_INDICATOR                (0xA5)
85 #define EEPROM_MAC_OFFSET               (0x01)
86 #define MAX_EEPROM_SIZE                 512
87 #define OTP_INDICATOR_1                 (0xF3)
88 #define OTP_INDICATOR_2                 (0xF7)
89
90 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
91                                          WAKE_MCAST | WAKE_BCAST | \
92                                          WAKE_ARP | WAKE_MAGIC)
93
94 /* USB related defines */
95 #define BULK_IN_PIPE                    1
96 #define BULK_OUT_PIPE                   2
97
98 /* default autosuspend delay (mSec)*/
99 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
100
101 /* statistic update interval (mSec) */
102 #define STAT_UPDATE_TIMER               (1 * 1000)
103
104 /* defines interrupts from interrupt EP */
105 #define MAX_INT_EP                      (32)
106 #define INT_EP_INTEP                    (31)
107 #define INT_EP_OTP_WR_DONE              (28)
108 #define INT_EP_EEE_TX_LPI_START         (26)
109 #define INT_EP_EEE_TX_LPI_STOP          (25)
110 #define INT_EP_EEE_RX_LPI               (24)
111 #define INT_EP_MAC_RESET_TIMEOUT        (23)
112 #define INT_EP_RDFO                     (22)
113 #define INT_EP_TXE                      (21)
114 #define INT_EP_USB_STATUS               (20)
115 #define INT_EP_TX_DIS                   (19)
116 #define INT_EP_RX_DIS                   (18)
117 #define INT_EP_PHY                      (17)
118 #define INT_EP_DP                       (16)
119 #define INT_EP_MAC_ERR                  (15)
120 #define INT_EP_TDFU                     (14)
121 #define INT_EP_TDFO                     (13)
122 #define INT_EP_UTX                      (12)
123 #define INT_EP_GPIO_11                  (11)
124 #define INT_EP_GPIO_10                  (10)
125 #define INT_EP_GPIO_9                   (9)
126 #define INT_EP_GPIO_8                   (8)
127 #define INT_EP_GPIO_7                   (7)
128 #define INT_EP_GPIO_6                   (6)
129 #define INT_EP_GPIO_5                   (5)
130 #define INT_EP_GPIO_4                   (4)
131 #define INT_EP_GPIO_3                   (3)
132 #define INT_EP_GPIO_2                   (2)
133 #define INT_EP_GPIO_1                   (1)
134 #define INT_EP_GPIO_0                   (0)
135
136 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
137         "RX FCS Errors",
138         "RX Alignment Errors",
139         "Rx Fragment Errors",
140         "RX Jabber Errors",
141         "RX Undersize Frame Errors",
142         "RX Oversize Frame Errors",
143         "RX Dropped Frames",
144         "RX Unicast Byte Count",
145         "RX Broadcast Byte Count",
146         "RX Multicast Byte Count",
147         "RX Unicast Frames",
148         "RX Broadcast Frames",
149         "RX Multicast Frames",
150         "RX Pause Frames",
151         "RX 64 Byte Frames",
152         "RX 65 - 127 Byte Frames",
153         "RX 128 - 255 Byte Frames",
154         "RX 256 - 511 Bytes Frames",
155         "RX 512 - 1023 Byte Frames",
156         "RX 1024 - 1518 Byte Frames",
157         "RX Greater 1518 Byte Frames",
158         "EEE RX LPI Transitions",
159         "EEE RX LPI Time",
160         "TX FCS Errors",
161         "TX Excess Deferral Errors",
162         "TX Carrier Errors",
163         "TX Bad Byte Count",
164         "TX Single Collisions",
165         "TX Multiple Collisions",
166         "TX Excessive Collision",
167         "TX Late Collisions",
168         "TX Unicast Byte Count",
169         "TX Broadcast Byte Count",
170         "TX Multicast Byte Count",
171         "TX Unicast Frames",
172         "TX Broadcast Frames",
173         "TX Multicast Frames",
174         "TX Pause Frames",
175         "TX 64 Byte Frames",
176         "TX 65 - 127 Byte Frames",
177         "TX 128 - 255 Byte Frames",
178         "TX 256 - 511 Bytes Frames",
179         "TX 512 - 1023 Byte Frames",
180         "TX 1024 - 1518 Byte Frames",
181         "TX Greater 1518 Byte Frames",
182         "EEE TX LPI Transitions",
183         "EEE TX LPI Time",
184 };
185
186 struct lan78xx_statstage {
187         u32 rx_fcs_errors;
188         u32 rx_alignment_errors;
189         u32 rx_fragment_errors;
190         u32 rx_jabber_errors;
191         u32 rx_undersize_frame_errors;
192         u32 rx_oversize_frame_errors;
193         u32 rx_dropped_frames;
194         u32 rx_unicast_byte_count;
195         u32 rx_broadcast_byte_count;
196         u32 rx_multicast_byte_count;
197         u32 rx_unicast_frames;
198         u32 rx_broadcast_frames;
199         u32 rx_multicast_frames;
200         u32 rx_pause_frames;
201         u32 rx_64_byte_frames;
202         u32 rx_65_127_byte_frames;
203         u32 rx_128_255_byte_frames;
204         u32 rx_256_511_bytes_frames;
205         u32 rx_512_1023_byte_frames;
206         u32 rx_1024_1518_byte_frames;
207         u32 rx_greater_1518_byte_frames;
208         u32 eee_rx_lpi_transitions;
209         u32 eee_rx_lpi_time;
210         u32 tx_fcs_errors;
211         u32 tx_excess_deferral_errors;
212         u32 tx_carrier_errors;
213         u32 tx_bad_byte_count;
214         u32 tx_single_collisions;
215         u32 tx_multiple_collisions;
216         u32 tx_excessive_collision;
217         u32 tx_late_collisions;
218         u32 tx_unicast_byte_count;
219         u32 tx_broadcast_byte_count;
220         u32 tx_multicast_byte_count;
221         u32 tx_unicast_frames;
222         u32 tx_broadcast_frames;
223         u32 tx_multicast_frames;
224         u32 tx_pause_frames;
225         u32 tx_64_byte_frames;
226         u32 tx_65_127_byte_frames;
227         u32 tx_128_255_byte_frames;
228         u32 tx_256_511_bytes_frames;
229         u32 tx_512_1023_byte_frames;
230         u32 tx_1024_1518_byte_frames;
231         u32 tx_greater_1518_byte_frames;
232         u32 eee_tx_lpi_transitions;
233         u32 eee_tx_lpi_time;
234 };
235
236 struct lan78xx_statstage64 {
237         u64 rx_fcs_errors;
238         u64 rx_alignment_errors;
239         u64 rx_fragment_errors;
240         u64 rx_jabber_errors;
241         u64 rx_undersize_frame_errors;
242         u64 rx_oversize_frame_errors;
243         u64 rx_dropped_frames;
244         u64 rx_unicast_byte_count;
245         u64 rx_broadcast_byte_count;
246         u64 rx_multicast_byte_count;
247         u64 rx_unicast_frames;
248         u64 rx_broadcast_frames;
249         u64 rx_multicast_frames;
250         u64 rx_pause_frames;
251         u64 rx_64_byte_frames;
252         u64 rx_65_127_byte_frames;
253         u64 rx_128_255_byte_frames;
254         u64 rx_256_511_bytes_frames;
255         u64 rx_512_1023_byte_frames;
256         u64 rx_1024_1518_byte_frames;
257         u64 rx_greater_1518_byte_frames;
258         u64 eee_rx_lpi_transitions;
259         u64 eee_rx_lpi_time;
260         u64 tx_fcs_errors;
261         u64 tx_excess_deferral_errors;
262         u64 tx_carrier_errors;
263         u64 tx_bad_byte_count;
264         u64 tx_single_collisions;
265         u64 tx_multiple_collisions;
266         u64 tx_excessive_collision;
267         u64 tx_late_collisions;
268         u64 tx_unicast_byte_count;
269         u64 tx_broadcast_byte_count;
270         u64 tx_multicast_byte_count;
271         u64 tx_unicast_frames;
272         u64 tx_broadcast_frames;
273         u64 tx_multicast_frames;
274         u64 tx_pause_frames;
275         u64 tx_64_byte_frames;
276         u64 tx_65_127_byte_frames;
277         u64 tx_128_255_byte_frames;
278         u64 tx_256_511_bytes_frames;
279         u64 tx_512_1023_byte_frames;
280         u64 tx_1024_1518_byte_frames;
281         u64 tx_greater_1518_byte_frames;
282         u64 eee_tx_lpi_transitions;
283         u64 eee_tx_lpi_time;
284 };
285
286 static u32 lan78xx_regs[] = {
287         ID_REV,
288         INT_STS,
289         HW_CFG,
290         PMT_CTL,
291         E2P_CMD,
292         E2P_DATA,
293         USB_STATUS,
294         VLAN_TYPE,
295         MAC_CR,
296         MAC_RX,
297         MAC_TX,
298         FLOW,
299         ERR_STS,
300         MII_ACC,
301         MII_DATA,
302         EEE_TX_LPI_REQ_DLY,
303         EEE_TW_TX_SYS,
304         EEE_TX_LPI_REM_DLY,
305         WUCSR
306 };
307
308 #define PHY_REG_SIZE (32 * sizeof(u32))
309
310 struct lan78xx_net;
311
312 struct lan78xx_priv {
313         struct lan78xx_net *dev;
314         u32 rfe_ctl;
315         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
316         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
317         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
318         struct mutex dataport_mutex; /* for dataport access */
319         spinlock_t rfe_ctl_lock; /* for rfe register access */
320         struct work_struct set_multicast;
321         struct work_struct set_vlan;
322         u32 wol;
323 };
324
325 enum skb_state {
326         illegal = 0,
327         tx_start,
328         tx_done,
329         rx_start,
330         rx_done,
331         rx_cleanup,
332         unlink_start
333 };
334
335 struct skb_data {               /* skb->cb is one of these */
336         struct urb *urb;
337         struct lan78xx_net *dev;
338         enum skb_state state;
339         size_t length;
340         int num_of_packet;
341 };
342
343 struct usb_context {
344         struct usb_ctrlrequest req;
345         struct lan78xx_net *dev;
346 };
347
348 #define EVENT_TX_HALT                   0
349 #define EVENT_RX_HALT                   1
350 #define EVENT_RX_MEMORY                 2
351 #define EVENT_STS_SPLIT                 3
352 #define EVENT_LINK_RESET                4
353 #define EVENT_RX_PAUSED                 5
354 #define EVENT_DEV_WAKING                6
355 #define EVENT_DEV_ASLEEP                7
356 #define EVENT_DEV_OPEN                  8
357 #define EVENT_STAT_UPDATE               9
358
359 struct statstage {
360         struct mutex                    access_lock;    /* for stats access */
361         struct lan78xx_statstage        saved;
362         struct lan78xx_statstage        rollover_count;
363         struct lan78xx_statstage        rollover_max;
364         struct lan78xx_statstage64      curr_stat;
365 };
366
367 struct irq_domain_data {
368         struct irq_domain       *irqdomain;
369         unsigned int            phyirq;
370         struct irq_chip         *irqchip;
371         irq_flow_handler_t      irq_handler;
372         u32                     irqenable;
373         struct mutex            irq_lock;               /* for irq bus access */
374 };
375
376 struct lan78xx_net {
377         struct net_device       *net;
378         struct usb_device       *udev;
379         struct usb_interface    *intf;
380         void                    *driver_priv;
381
382         int                     rx_qlen;
383         int                     tx_qlen;
384         struct sk_buff_head     rxq;
385         struct sk_buff_head     txq;
386         struct sk_buff_head     done;
387         struct sk_buff_head     rxq_pause;
388         struct sk_buff_head     txq_pend;
389
390         struct tasklet_struct   bh;
391         struct delayed_work     wq;
392
393         int                     msg_enable;
394
395         struct urb              *urb_intr;
396         struct usb_anchor       deferred;
397
398         struct mutex            phy_mutex; /* for phy access */
399         unsigned                pipe_in, pipe_out, pipe_intr;
400
401         u32                     hard_mtu;       /* count any extra framing */
402         size_t                  rx_urb_size;    /* size for rx urbs */
403
404         unsigned long           flags;
405
406         wait_queue_head_t       *wait;
407         unsigned char           suspend_count;
408
409         unsigned                maxpacket;
410         struct timer_list       delay;
411         struct timer_list       stat_monitor;
412
413         unsigned long           data[5];
414
415         int                     link_on;
416         u8                      mdix_ctrl;
417
418         u32                     chipid;
419         u32                     chiprev;
420         struct mii_bus          *mdiobus;
421         phy_interface_t         interface;
422
423         int                     fc_autoneg;
424         u8                      fc_request_control;
425
426         int                     delta;
427         struct statstage        stats;
428
429         struct irq_domain_data  domain_data;
430 };
431
432 /* define external phy id */
433 #define PHY_LAN8835                     (0x0007C130)
434 #define PHY_KSZ9031RNX                  (0x00221620)
435
436 /* use ethtool to change the level for any given device */
437 static int msg_level = -1;
438 module_param(msg_level, int, 0);
439 MODULE_PARM_DESC(msg_level, "Override default message level");
440
441 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
442 {
443         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
444         int ret;
445
446         if (!buf)
447                 return -ENOMEM;
448
449         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
450                               USB_VENDOR_REQUEST_READ_REGISTER,
451                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
452                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
453         if (likely(ret >= 0)) {
454                 le32_to_cpus(buf);
455                 *data = *buf;
456         } else {
457                 netdev_warn(dev->net,
458                             "Failed to read register index 0x%08x. ret = %d",
459                             index, ret);
460         }
461
462         kfree(buf);
463
464         return ret;
465 }
466
467 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
468 {
469         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
470         int ret;
471
472         if (!buf)
473                 return -ENOMEM;
474
475         *buf = data;
476         cpu_to_le32s(buf);
477
478         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
479                               USB_VENDOR_REQUEST_WRITE_REGISTER,
480                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
481                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
482         if (unlikely(ret < 0)) {
483                 netdev_warn(dev->net,
484                             "Failed to write register index 0x%08x. ret = %d",
485                             index, ret);
486         }
487
488         kfree(buf);
489
490         return ret;
491 }
492
493 static int lan78xx_read_stats(struct lan78xx_net *dev,
494                               struct lan78xx_statstage *data)
495 {
496         int ret = 0;
497         int i;
498         struct lan78xx_statstage *stats;
499         u32 *src;
500         u32 *dst;
501
502         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
503         if (!stats)
504                 return -ENOMEM;
505
506         ret = usb_control_msg(dev->udev,
507                               usb_rcvctrlpipe(dev->udev, 0),
508                               USB_VENDOR_REQUEST_GET_STATS,
509                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
510                               0,
511                               0,
512                               (void *)stats,
513                               sizeof(*stats),
514                               USB_CTRL_SET_TIMEOUT);
515         if (likely(ret >= 0)) {
516                 src = (u32 *)stats;
517                 dst = (u32 *)data;
518                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
519                         le32_to_cpus(&src[i]);
520                         dst[i] = src[i];
521                 }
522         } else {
523                 netdev_warn(dev->net,
524                             "Failed to read stat ret = %d", ret);
525         }
526
527         kfree(stats);
528
529         return ret;
530 }
531
532 #define check_counter_rollover(struct1, dev_stats, member) {    \
533         if (struct1->member < dev_stats.saved.member)           \
534                 dev_stats.rollover_count.member++;              \
535         }
536
537 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
538                                         struct lan78xx_statstage *stats)
539 {
540         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
541         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
542         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
543         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
544         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
545         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
546         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
547         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
548         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
549         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
550         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
551         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
552         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
553         check_counter_rollover(stats, dev->stats, rx_pause_frames);
554         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
555         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
556         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
557         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
558         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
559         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
561         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
562         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
563         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
564         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
565         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
566         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
567         check_counter_rollover(stats, dev->stats, tx_single_collisions);
568         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
569         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
570         check_counter_rollover(stats, dev->stats, tx_late_collisions);
571         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
572         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
573         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
574         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
575         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
576         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
577         check_counter_rollover(stats, dev->stats, tx_pause_frames);
578         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
579         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
580         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
581         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
582         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
583         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
584         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
585         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
586         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
587
588         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
589 }
590
591 static void lan78xx_update_stats(struct lan78xx_net *dev)
592 {
593         u32 *p, *count, *max;
594         u64 *data;
595         int i;
596         struct lan78xx_statstage lan78xx_stats;
597
598         if (usb_autopm_get_interface(dev->intf) < 0)
599                 return;
600
601         p = (u32 *)&lan78xx_stats;
602         count = (u32 *)&dev->stats.rollover_count;
603         max = (u32 *)&dev->stats.rollover_max;
604         data = (u64 *)&dev->stats.curr_stat;
605
606         mutex_lock(&dev->stats.access_lock);
607
608         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
609                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
610
611         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
612                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
613
614         mutex_unlock(&dev->stats.access_lock);
615
616         usb_autopm_put_interface(dev->intf);
617 }
618
619 /* Loop until the read is completed with timeout called with phy_mutex held */
620 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
621 {
622         unsigned long start_time = jiffies;
623         u32 val;
624         int ret;
625
626         do {
627                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
628                 if (unlikely(ret < 0))
629                         return -EIO;
630
631                 if (!(val & MII_ACC_MII_BUSY_))
632                         return 0;
633         } while (!time_after(jiffies, start_time + HZ));
634
635         return -EIO;
636 }
637
638 static inline u32 mii_access(int id, int index, int read)
639 {
640         u32 ret;
641
642         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
643         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
644         if (read)
645                 ret |= MII_ACC_MII_READ_;
646         else
647                 ret |= MII_ACC_MII_WRITE_;
648         ret |= MII_ACC_MII_BUSY_;
649
650         return ret;
651 }
652
653 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_) ||
665                     (val & E2P_CMD_EPC_TIMEOUT_))
666                         break;
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
671                 netdev_warn(dev->net, "EEPROM read operation timeout");
672                 return -EIO;
673         }
674
675         return 0;
676 }
677
678 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
679 {
680         unsigned long start_time = jiffies;
681         u32 val;
682         int ret;
683
684         do {
685                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
686                 if (unlikely(ret < 0))
687                         return -EIO;
688
689                 if (!(val & E2P_CMD_EPC_BUSY_))
690                         return 0;
691
692                 usleep_range(40, 100);
693         } while (!time_after(jiffies, start_time + HZ));
694
695         netdev_warn(dev->net, "EEPROM is busy");
696         return -EIO;
697 }
698
699 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
700                                    u32 length, u8 *data)
701 {
702         u32 val;
703         u32 saved;
704         int i, ret;
705         int retval;
706
707         /* depends on chip, some EEPROM pins are muxed with LED function.
708          * disable & restore LED function to access EEPROM.
709          */
710         ret = lan78xx_read_reg(dev, HW_CFG, &val);
711         saved = val;
712         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
713                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
714                 ret = lan78xx_write_reg(dev, HW_CFG, val);
715         }
716
717         retval = lan78xx_eeprom_confirm_not_busy(dev);
718         if (retval)
719                 return retval;
720
721         for (i = 0; i < length; i++) {
722                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
723                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
724                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
725                 if (unlikely(ret < 0)) {
726                         retval = -EIO;
727                         goto exit;
728                 }
729
730                 retval = lan78xx_wait_eeprom(dev);
731                 if (retval < 0)
732                         goto exit;
733
734                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
735                 if (unlikely(ret < 0)) {
736                         retval = -EIO;
737                         goto exit;
738                 }
739
740                 data[i] = val & 0xFF;
741                 offset++;
742         }
743
744         retval = 0;
745 exit:
746         if (dev->chipid == ID_REV_CHIP_ID_7800_)
747                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
748
749         return retval;
750 }
751
752 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
753                                u32 length, u8 *data)
754 {
755         u8 sig;
756         int ret;
757
758         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
759         if ((ret == 0) && (sig == EEPROM_INDICATOR))
760                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
761         else
762                 ret = -EINVAL;
763
764         return ret;
765 }
766
767 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
768                                     u32 length, u8 *data)
769 {
770         u32 val;
771         u32 saved;
772         int i, ret;
773         int retval;
774
775         /* depends on chip, some EEPROM pins are muxed with LED function.
776          * disable & restore LED function to access EEPROM.
777          */
778         ret = lan78xx_read_reg(dev, HW_CFG, &val);
779         saved = val;
780         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
781                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
782                 ret = lan78xx_write_reg(dev, HW_CFG, val);
783         }
784
785         retval = lan78xx_eeprom_confirm_not_busy(dev);
786         if (retval)
787                 goto exit;
788
789         /* Issue write/erase enable command */
790         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
791         ret = lan78xx_write_reg(dev, E2P_CMD, val);
792         if (unlikely(ret < 0)) {
793                 retval = -EIO;
794                 goto exit;
795         }
796
797         retval = lan78xx_wait_eeprom(dev);
798         if (retval < 0)
799                 goto exit;
800
801         for (i = 0; i < length; i++) {
802                 /* Fill data register */
803                 val = data[i];
804                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
805                 if (ret < 0) {
806                         retval = -EIO;
807                         goto exit;
808                 }
809
810                 /* Send "write" command */
811                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
812                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
813                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
814                 if (ret < 0) {
815                         retval = -EIO;
816                         goto exit;
817                 }
818
819                 retval = lan78xx_wait_eeprom(dev);
820                 if (retval < 0)
821                         goto exit;
822
823                 offset++;
824         }
825
826         retval = 0;
827 exit:
828         if (dev->chipid == ID_REV_CHIP_ID_7800_)
829                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
830
831         return retval;
832 }
833
834 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
835                                 u32 length, u8 *data)
836 {
837         int i;
838         int ret;
839         u32 buf;
840         unsigned long timeout;
841
842         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843
844         if (buf & OTP_PWR_DN_PWRDN_N_) {
845                 /* clear it and wait to be cleared */
846                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
847
848                 timeout = jiffies + HZ;
849                 do {
850                         usleep_range(1, 10);
851                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
852                         if (time_after(jiffies, timeout)) {
853                                 netdev_warn(dev->net,
854                                             "timeout on OTP_PWR_DN");
855                                 return -EIO;
856                         }
857                 } while (buf & OTP_PWR_DN_PWRDN_N_);
858         }
859
860         for (i = 0; i < length; i++) {
861                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
862                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
863                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
864                                         ((offset + i) & OTP_ADDR2_10_3));
865
866                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
867                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
868
869                 timeout = jiffies + HZ;
870                 do {
871                         udelay(1);
872                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
873                         if (time_after(jiffies, timeout)) {
874                                 netdev_warn(dev->net,
875                                             "timeout on OTP_STATUS");
876                                 return -EIO;
877                         }
878                 } while (buf & OTP_STATUS_BUSY_);
879
880                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
881
882                 data[i] = (u8)(buf & 0xFF);
883         }
884
885         return 0;
886 }
887
888 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
889                                  u32 length, u8 *data)
890 {
891         int i;
892         int ret;
893         u32 buf;
894         unsigned long timeout;
895
896         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897
898         if (buf & OTP_PWR_DN_PWRDN_N_) {
899                 /* clear it and wait to be cleared */
900                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
901
902                 timeout = jiffies + HZ;
903                 do {
904                         udelay(1);
905                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
906                         if (time_after(jiffies, timeout)) {
907                                 netdev_warn(dev->net,
908                                             "timeout on OTP_PWR_DN completion");
909                                 return -EIO;
910                         }
911                 } while (buf & OTP_PWR_DN_PWRDN_N_);
912         }
913
914         /* set to BYTE program mode */
915         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
916
917         for (i = 0; i < length; i++) {
918                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
919                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
920                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
921                                         ((offset + i) & OTP_ADDR2_10_3));
922                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
923                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
924                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
925
926                 timeout = jiffies + HZ;
927                 do {
928                         udelay(1);
929                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
930                         if (time_after(jiffies, timeout)) {
931                                 netdev_warn(dev->net,
932                                             "Timeout on OTP_STATUS completion");
933                                 return -EIO;
934                         }
935                 } while (buf & OTP_STATUS_BUSY_);
936         }
937
938         return 0;
939 }
940
941 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
942                             u32 length, u8 *data)
943 {
944         u8 sig;
945         int ret;
946
947         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
948
949         if (ret == 0) {
950                 if (sig == OTP_INDICATOR_2)
951                         offset += 0x100;
952                 else if (sig != OTP_INDICATOR_1)
953                         ret = -EINVAL;
954                 if (!ret)
955                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
956         }
957
958         return ret;
959 }
960
961 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
962 {
963         int i, ret;
964
965         for (i = 0; i < 100; i++) {
966                 u32 dp_sel;
967
968                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
969                 if (unlikely(ret < 0))
970                         return -EIO;
971
972                 if (dp_sel & DP_SEL_DPRDY_)
973                         return 0;
974
975                 usleep_range(40, 100);
976         }
977
978         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
979
980         return -EIO;
981 }
982
983 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
984                                   u32 addr, u32 length, u32 *buf)
985 {
986         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
987         u32 dp_sel;
988         int i, ret;
989
990         if (usb_autopm_get_interface(dev->intf) < 0)
991                         return 0;
992
993         mutex_lock(&pdata->dataport_mutex);
994
995         ret = lan78xx_dataport_wait_not_busy(dev);
996         if (ret < 0)
997                 goto done;
998
999         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1000
1001         dp_sel &= ~DP_SEL_RSEL_MASK_;
1002         dp_sel |= ram_select;
1003         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1004
1005         for (i = 0; i < length; i++) {
1006                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1007
1008                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1009
1010                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1011
1012                 ret = lan78xx_dataport_wait_not_busy(dev);
1013                 if (ret < 0)
1014                         goto done;
1015         }
1016
1017 done:
1018         mutex_unlock(&pdata->dataport_mutex);
1019         usb_autopm_put_interface(dev->intf);
1020
1021         return ret;
1022 }
1023
1024 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1025                                     int index, u8 addr[ETH_ALEN])
1026 {
1027         u32     temp;
1028
1029         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1030                 temp = addr[3];
1031                 temp = addr[2] | (temp << 8);
1032                 temp = addr[1] | (temp << 8);
1033                 temp = addr[0] | (temp << 8);
1034                 pdata->pfilter_table[index][1] = temp;
1035                 temp = addr[5];
1036                 temp = addr[4] | (temp << 8);
1037                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1038                 pdata->pfilter_table[index][0] = temp;
1039         }
1040 }
1041
1042 /* returns hash bit number for given MAC address */
1043 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1044 {
1045         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1046 }
1047
1048 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1049 {
1050         struct lan78xx_priv *pdata =
1051                         container_of(param, struct lan78xx_priv, set_multicast);
1052         struct lan78xx_net *dev = pdata->dev;
1053         int i;
1054         int ret;
1055
1056         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1057                   pdata->rfe_ctl);
1058
1059         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1060                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1061
1062         for (i = 1; i < NUM_OF_MAF; i++) {
1063                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1064                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1065                                         pdata->pfilter_table[i][1]);
1066                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1067                                         pdata->pfilter_table[i][0]);
1068         }
1069
1070         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1071 }
1072
1073 static void lan78xx_set_multicast(struct net_device *netdev)
1074 {
1075         struct lan78xx_net *dev = netdev_priv(netdev);
1076         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1077         unsigned long flags;
1078         int i;
1079
1080         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1081
1082         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1083                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1084
1085         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1086                         pdata->mchash_table[i] = 0;
1087         /* pfilter_table[0] has own HW address */
1088         for (i = 1; i < NUM_OF_MAF; i++) {
1089                         pdata->pfilter_table[i][0] =
1090                         pdata->pfilter_table[i][1] = 0;
1091         }
1092
1093         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1094
1095         if (dev->net->flags & IFF_PROMISC) {
1096                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1097                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1098         } else {
1099                 if (dev->net->flags & IFF_ALLMULTI) {
1100                         netif_dbg(dev, drv, dev->net,
1101                                   "receive all multicast enabled");
1102                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1103                 }
1104         }
1105
1106         if (netdev_mc_count(dev->net)) {
1107                 struct netdev_hw_addr *ha;
1108                 int i;
1109
1110                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1111
1112                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1113
1114                 i = 1;
1115                 netdev_for_each_mc_addr(ha, netdev) {
1116                         /* set first 32 into Perfect Filter */
1117                         if (i < 33) {
1118                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1119                         } else {
1120                                 u32 bitnum = lan78xx_hash(ha->addr);
1121
1122                                 pdata->mchash_table[bitnum / 32] |=
1123                                                         (1 << (bitnum % 32));
1124                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1125                         }
1126                         i++;
1127                 }
1128         }
1129
1130         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1131
1132         /* defer register writes to a sleepable context */
1133         schedule_work(&pdata->set_multicast);
1134 }
1135
1136 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1137                                       u16 lcladv, u16 rmtadv)
1138 {
1139         u32 flow = 0, fct_flow = 0;
1140         int ret;
1141         u8 cap;
1142
1143         if (dev->fc_autoneg)
1144                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1145         else
1146                 cap = dev->fc_request_control;
1147
1148         if (cap & FLOW_CTRL_TX)
1149                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1150
1151         if (cap & FLOW_CTRL_RX)
1152                 flow |= FLOW_CR_RX_FCEN_;
1153
1154         if (dev->udev->speed == USB_SPEED_SUPER)
1155                 fct_flow = 0x817;
1156         else if (dev->udev->speed == USB_SPEED_HIGH)
1157                 fct_flow = 0x211;
1158
1159         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1160                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1161                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1162
1163         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1164
1165         /* threshold value should be set before enabling flow */
1166         ret = lan78xx_write_reg(dev, FLOW, flow);
1167
1168         return 0;
1169 }
1170
1171 static int lan78xx_link_reset(struct lan78xx_net *dev)
1172 {
1173         struct phy_device *phydev = dev->net->phydev;
1174         struct ethtool_link_ksettings ecmd;
1175         int ladv, radv, ret, link;
1176         u32 buf;
1177
1178         /* clear LAN78xx interrupt status */
1179         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1180         if (unlikely(ret < 0))
1181                 return -EIO;
1182
1183         mutex_lock(&phydev->lock);
1184         phy_read_status(phydev);
1185         link = phydev->link;
1186         mutex_unlock(&phydev->lock);
1187
1188         if (!link && dev->link_on) {
1189                 dev->link_on = false;
1190
1191                 /* reset MAC */
1192                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1193                 if (unlikely(ret < 0))
1194                         return -EIO;
1195                 buf |= MAC_CR_RST_;
1196                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1197                 if (unlikely(ret < 0))
1198                         return -EIO;
1199
1200                 del_timer(&dev->stat_monitor);
1201         } else if (link && !dev->link_on) {
1202                 dev->link_on = true;
1203
1204                 phy_ethtool_ksettings_get(phydev, &ecmd);
1205
1206                 if (dev->udev->speed == USB_SPEED_SUPER) {
1207                         if (ecmd.base.speed == 1000) {
1208                                 /* disable U2 */
1209                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1210                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1211                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1212                                 /* enable U1 */
1213                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1214                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1215                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1216                         } else {
1217                                 /* enable U1 & U2 */
1218                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1219                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1220                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1221                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1222                         }
1223                 }
1224
1225                 ladv = phy_read(phydev, MII_ADVERTISE);
1226                 if (ladv < 0)
1227                         return ladv;
1228
1229                 radv = phy_read(phydev, MII_LPA);
1230                 if (radv < 0)
1231                         return radv;
1232
1233                 netif_dbg(dev, link, dev->net,
1234                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1235                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1236
1237                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1238                                                  radv);
1239
1240                 if (!timer_pending(&dev->stat_monitor)) {
1241                         dev->delta = 1;
1242                         mod_timer(&dev->stat_monitor,
1243                                   jiffies + STAT_UPDATE_TIMER);
1244                 }
1245
1246                 tasklet_schedule(&dev->bh);
1247         }
1248
1249         return ret;
1250 }
1251
1252 /* some work can't be done in tasklets, so we use keventd
1253  *
1254  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1255  * but tasklet_schedule() doesn't.      hope the failure is rare.
1256  */
1257 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1258 {
1259         set_bit(work, &dev->flags);
1260         if (!schedule_delayed_work(&dev->wq, 0))
1261                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1262 }
1263
1264 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1265 {
1266         u32 intdata;
1267
1268         if (urb->actual_length != 4) {
1269                 netdev_warn(dev->net,
1270                             "unexpected urb length %d", urb->actual_length);
1271                 return;
1272         }
1273
1274         memcpy(&intdata, urb->transfer_buffer, 4);
1275         le32_to_cpus(&intdata);
1276
1277         if (intdata & INT_ENP_PHY_INT) {
1278                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1279                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1280
1281                 if (dev->domain_data.phyirq > 0) {
1282                         local_irq_disable();
1283                         generic_handle_irq(dev->domain_data.phyirq);
1284                         local_irq_enable();
1285                 }
1286         } else
1287                 netdev_warn(dev->net,
1288                             "unexpected interrupt: 0x%08x\n", intdata);
1289 }
1290
1291 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1292 {
1293         return MAX_EEPROM_SIZE;
1294 }
1295
1296 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1297                                       struct ethtool_eeprom *ee, u8 *data)
1298 {
1299         struct lan78xx_net *dev = netdev_priv(netdev);
1300         int ret;
1301
1302         ret = usb_autopm_get_interface(dev->intf);
1303         if (ret)
1304                 return ret;
1305
1306         ee->magic = LAN78XX_EEPROM_MAGIC;
1307
1308         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1309
1310         usb_autopm_put_interface(dev->intf);
1311
1312         return ret;
1313 }
1314
1315 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1316                                       struct ethtool_eeprom *ee, u8 *data)
1317 {
1318         struct lan78xx_net *dev = netdev_priv(netdev);
1319         int ret;
1320
1321         ret = usb_autopm_get_interface(dev->intf);
1322         if (ret)
1323                 return ret;
1324
1325         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1326          * to load data from EEPROM
1327          */
1328         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1329                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1330         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1331                  (ee->offset == 0) &&
1332                  (ee->len == 512) &&
1333                  (data[0] == OTP_INDICATOR_1))
1334                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1335
1336         usb_autopm_put_interface(dev->intf);
1337
1338         return ret;
1339 }
1340
1341 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1342                                 u8 *data)
1343 {
1344         if (stringset == ETH_SS_STATS)
1345                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1346 }
1347
1348 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1349 {
1350         if (sset == ETH_SS_STATS)
1351                 return ARRAY_SIZE(lan78xx_gstrings);
1352         else
1353                 return -EOPNOTSUPP;
1354 }
1355
1356 static void lan78xx_get_stats(struct net_device *netdev,
1357                               struct ethtool_stats *stats, u64 *data)
1358 {
1359         struct lan78xx_net *dev = netdev_priv(netdev);
1360
1361         lan78xx_update_stats(dev);
1362
1363         mutex_lock(&dev->stats.access_lock);
1364         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1365         mutex_unlock(&dev->stats.access_lock);
1366 }
1367
1368 static void lan78xx_get_wol(struct net_device *netdev,
1369                             struct ethtool_wolinfo *wol)
1370 {
1371         struct lan78xx_net *dev = netdev_priv(netdev);
1372         int ret;
1373         u32 buf;
1374         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1375
1376         if (usb_autopm_get_interface(dev->intf) < 0)
1377                         return;
1378
1379         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1380         if (unlikely(ret < 0)) {
1381                 wol->supported = 0;
1382                 wol->wolopts = 0;
1383         } else {
1384                 if (buf & USB_CFG_RMT_WKP_) {
1385                         wol->supported = WAKE_ALL;
1386                         wol->wolopts = pdata->wol;
1387                 } else {
1388                         wol->supported = 0;
1389                         wol->wolopts = 0;
1390                 }
1391         }
1392
1393         usb_autopm_put_interface(dev->intf);
1394 }
1395
1396 static int lan78xx_set_wol(struct net_device *netdev,
1397                            struct ethtool_wolinfo *wol)
1398 {
1399         struct lan78xx_net *dev = netdev_priv(netdev);
1400         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1401         int ret;
1402
1403         ret = usb_autopm_get_interface(dev->intf);
1404         if (ret < 0)
1405                 return ret;
1406
1407         if (wol->wolopts & ~WAKE_ALL)
1408                 return -EINVAL;
1409
1410         pdata->wol = wol->wolopts;
1411
1412         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1413
1414         phy_ethtool_set_wol(netdev->phydev, wol);
1415
1416         usb_autopm_put_interface(dev->intf);
1417
1418         return ret;
1419 }
1420
1421 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1422 {
1423         struct lan78xx_net *dev = netdev_priv(net);
1424         struct phy_device *phydev = net->phydev;
1425         int ret;
1426         u32 buf;
1427
1428         ret = usb_autopm_get_interface(dev->intf);
1429         if (ret < 0)
1430                 return ret;
1431
1432         ret = phy_ethtool_get_eee(phydev, edata);
1433         if (ret < 0)
1434                 goto exit;
1435
1436         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1437         if (buf & MAC_CR_EEE_EN_) {
1438                 edata->eee_enabled = true;
1439                 edata->eee_active = !!(edata->advertised &
1440                                        edata->lp_advertised);
1441                 edata->tx_lpi_enabled = true;
1442                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1443                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1444                 edata->tx_lpi_timer = buf;
1445         } else {
1446                 edata->eee_enabled = false;
1447                 edata->eee_active = false;
1448                 edata->tx_lpi_enabled = false;
1449                 edata->tx_lpi_timer = 0;
1450         }
1451
1452         ret = 0;
1453 exit:
1454         usb_autopm_put_interface(dev->intf);
1455
1456         return ret;
1457 }
1458
1459 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1460 {
1461         struct lan78xx_net *dev = netdev_priv(net);
1462         int ret;
1463         u32 buf;
1464
1465         ret = usb_autopm_get_interface(dev->intf);
1466         if (ret < 0)
1467                 return ret;
1468
1469         if (edata->eee_enabled) {
1470                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1471                 buf |= MAC_CR_EEE_EN_;
1472                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1473
1474                 phy_ethtool_set_eee(net->phydev, edata);
1475
1476                 buf = (u32)edata->tx_lpi_timer;
1477                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1478         } else {
1479                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1480                 buf &= ~MAC_CR_EEE_EN_;
1481                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1482         }
1483
1484         usb_autopm_put_interface(dev->intf);
1485
1486         return 0;
1487 }
1488
1489 static u32 lan78xx_get_link(struct net_device *net)
1490 {
1491         u32 link;
1492
1493         mutex_lock(&net->phydev->lock);
1494         phy_read_status(net->phydev);
1495         link = net->phydev->link;
1496         mutex_unlock(&net->phydev->lock);
1497
1498         return link;
1499 }
1500
1501 static void lan78xx_get_drvinfo(struct net_device *net,
1502                                 struct ethtool_drvinfo *info)
1503 {
1504         struct lan78xx_net *dev = netdev_priv(net);
1505
1506         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1507         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1508 }
1509
1510 static u32 lan78xx_get_msglevel(struct net_device *net)
1511 {
1512         struct lan78xx_net *dev = netdev_priv(net);
1513
1514         return dev->msg_enable;
1515 }
1516
1517 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1518 {
1519         struct lan78xx_net *dev = netdev_priv(net);
1520
1521         dev->msg_enable = level;
1522 }
1523
1524 static int lan78xx_get_link_ksettings(struct net_device *net,
1525                                       struct ethtool_link_ksettings *cmd)
1526 {
1527         struct lan78xx_net *dev = netdev_priv(net);
1528         struct phy_device *phydev = net->phydev;
1529         int ret;
1530
1531         ret = usb_autopm_get_interface(dev->intf);
1532         if (ret < 0)
1533                 return ret;
1534
1535         phy_ethtool_ksettings_get(phydev, cmd);
1536
1537         usb_autopm_put_interface(dev->intf);
1538
1539         return ret;
1540 }
1541
1542 static int lan78xx_set_link_ksettings(struct net_device *net,
1543                                       const struct ethtool_link_ksettings *cmd)
1544 {
1545         struct lan78xx_net *dev = netdev_priv(net);
1546         struct phy_device *phydev = net->phydev;
1547         int ret = 0;
1548         int temp;
1549
1550         ret = usb_autopm_get_interface(dev->intf);
1551         if (ret < 0)
1552                 return ret;
1553
1554         /* change speed & duplex */
1555         ret = phy_ethtool_ksettings_set(phydev, cmd);
1556
1557         if (!cmd->base.autoneg) {
1558                 /* force link down */
1559                 temp = phy_read(phydev, MII_BMCR);
1560                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1561                 mdelay(1);
1562                 phy_write(phydev, MII_BMCR, temp);
1563         }
1564
1565         usb_autopm_put_interface(dev->intf);
1566
1567         return ret;
1568 }
1569
1570 static void lan78xx_get_pause(struct net_device *net,
1571                               struct ethtool_pauseparam *pause)
1572 {
1573         struct lan78xx_net *dev = netdev_priv(net);
1574         struct phy_device *phydev = net->phydev;
1575         struct ethtool_link_ksettings ecmd;
1576
1577         phy_ethtool_ksettings_get(phydev, &ecmd);
1578
1579         pause->autoneg = dev->fc_autoneg;
1580
1581         if (dev->fc_request_control & FLOW_CTRL_TX)
1582                 pause->tx_pause = 1;
1583
1584         if (dev->fc_request_control & FLOW_CTRL_RX)
1585                 pause->rx_pause = 1;
1586 }
1587
1588 static int lan78xx_set_pause(struct net_device *net,
1589                              struct ethtool_pauseparam *pause)
1590 {
1591         struct lan78xx_net *dev = netdev_priv(net);
1592         struct phy_device *phydev = net->phydev;
1593         struct ethtool_link_ksettings ecmd;
1594         int ret;
1595
1596         phy_ethtool_ksettings_get(phydev, &ecmd);
1597
1598         if (pause->autoneg && !ecmd.base.autoneg) {
1599                 ret = -EINVAL;
1600                 goto exit;
1601         }
1602
1603         dev->fc_request_control = 0;
1604         if (pause->rx_pause)
1605                 dev->fc_request_control |= FLOW_CTRL_RX;
1606
1607         if (pause->tx_pause)
1608                 dev->fc_request_control |= FLOW_CTRL_TX;
1609
1610         if (ecmd.base.autoneg) {
1611                 u32 mii_adv;
1612                 u32 advertising;
1613
1614                 ethtool_convert_link_mode_to_legacy_u32(
1615                         &advertising, ecmd.link_modes.advertising);
1616
1617                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1618                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1619                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1620
1621                 ethtool_convert_legacy_u32_to_link_mode(
1622                         ecmd.link_modes.advertising, advertising);
1623
1624                 phy_ethtool_ksettings_set(phydev, &ecmd);
1625         }
1626
1627         dev->fc_autoneg = pause->autoneg;
1628
1629         ret = 0;
1630 exit:
1631         return ret;
1632 }
1633
1634 static int lan78xx_get_regs_len(struct net_device *netdev)
1635 {
1636         if (!netdev->phydev)
1637                 return (sizeof(lan78xx_regs));
1638         else
1639                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1640 }
1641
1642 static void
1643 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1644                  void *buf)
1645 {
1646         u32 *data = buf;
1647         int i, j;
1648         struct lan78xx_net *dev = netdev_priv(netdev);
1649
1650         /* Read Device/MAC registers */
1651         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1652                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1653
1654         if (!netdev->phydev)
1655                 return;
1656
1657         /* Read PHY registers */
1658         for (j = 0; j < 32; i++, j++)
1659                 data[i] = phy_read(netdev->phydev, j);
1660 }
1661
1662 static const struct ethtool_ops lan78xx_ethtool_ops = {
1663         .get_link       = lan78xx_get_link,
1664         .nway_reset     = phy_ethtool_nway_reset,
1665         .get_drvinfo    = lan78xx_get_drvinfo,
1666         .get_msglevel   = lan78xx_get_msglevel,
1667         .set_msglevel   = lan78xx_set_msglevel,
1668         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1669         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1670         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1671         .get_ethtool_stats = lan78xx_get_stats,
1672         .get_sset_count = lan78xx_get_sset_count,
1673         .get_strings    = lan78xx_get_strings,
1674         .get_wol        = lan78xx_get_wol,
1675         .set_wol        = lan78xx_set_wol,
1676         .get_eee        = lan78xx_get_eee,
1677         .set_eee        = lan78xx_set_eee,
1678         .get_pauseparam = lan78xx_get_pause,
1679         .set_pauseparam = lan78xx_set_pause,
1680         .get_link_ksettings = lan78xx_get_link_ksettings,
1681         .set_link_ksettings = lan78xx_set_link_ksettings,
1682         .get_regs_len   = lan78xx_get_regs_len,
1683         .get_regs       = lan78xx_get_regs,
1684 };
1685
1686 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1687 {
1688         if (!netif_running(netdev))
1689                 return -EINVAL;
1690
1691         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1692 }
1693
1694 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1695 {
1696         u32 addr_lo, addr_hi;
1697         int ret;
1698         u8 addr[6];
1699
1700         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1701         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1702
1703         addr[0] = addr_lo & 0xFF;
1704         addr[1] = (addr_lo >> 8) & 0xFF;
1705         addr[2] = (addr_lo >> 16) & 0xFF;
1706         addr[3] = (addr_lo >> 24) & 0xFF;
1707         addr[4] = addr_hi & 0xFF;
1708         addr[5] = (addr_hi >> 8) & 0xFF;
1709
1710         if (!is_valid_ether_addr(addr)) {
1711                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1712                         /* valid address present in Device Tree */
1713                         netif_dbg(dev, ifup, dev->net,
1714                                   "MAC address read from Device Tree");
1715                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1716                                                  ETH_ALEN, addr) == 0) ||
1717                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1718                                               ETH_ALEN, addr) == 0)) &&
1719                            is_valid_ether_addr(addr)) {
1720                         /* eeprom values are valid so use them */
1721                         netif_dbg(dev, ifup, dev->net,
1722                                   "MAC address read from EEPROM");
1723                 } else {
1724                         /* generate random MAC */
1725                         eth_random_addr(addr);
1726                         netif_dbg(dev, ifup, dev->net,
1727                                   "MAC address set to random addr");
1728                 }
1729
1730                 addr_lo = addr[0] | (addr[1] << 8) |
1731                           (addr[2] << 16) | (addr[3] << 24);
1732                 addr_hi = addr[4] | (addr[5] << 8);
1733
1734                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1735                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1736         }
1737
1738         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1739         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1740
1741         ether_addr_copy(dev->net->dev_addr, addr);
1742 }
1743
1744 /* MDIO read and write wrappers for phylib */
1745 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1746 {
1747         struct lan78xx_net *dev = bus->priv;
1748         u32 val, addr;
1749         int ret;
1750
1751         ret = usb_autopm_get_interface(dev->intf);
1752         if (ret < 0)
1753                 return ret;
1754
1755         mutex_lock(&dev->phy_mutex);
1756
1757         /* confirm MII not busy */
1758         ret = lan78xx_phy_wait_not_busy(dev);
1759         if (ret < 0)
1760                 goto done;
1761
1762         /* set the address, index & direction (read from PHY) */
1763         addr = mii_access(phy_id, idx, MII_READ);
1764         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1765
1766         ret = lan78xx_phy_wait_not_busy(dev);
1767         if (ret < 0)
1768                 goto done;
1769
1770         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1771
1772         ret = (int)(val & 0xFFFF);
1773
1774 done:
1775         mutex_unlock(&dev->phy_mutex);
1776         usb_autopm_put_interface(dev->intf);
1777
1778         return ret;
1779 }
1780
1781 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1782                                  u16 regval)
1783 {
1784         struct lan78xx_net *dev = bus->priv;
1785         u32 val, addr;
1786         int ret;
1787
1788         ret = usb_autopm_get_interface(dev->intf);
1789         if (ret < 0)
1790                 return ret;
1791
1792         mutex_lock(&dev->phy_mutex);
1793
1794         /* confirm MII not busy */
1795         ret = lan78xx_phy_wait_not_busy(dev);
1796         if (ret < 0)
1797                 goto done;
1798
1799         val = (u32)regval;
1800         ret = lan78xx_write_reg(dev, MII_DATA, val);
1801
1802         /* set the address, index & direction (write to PHY) */
1803         addr = mii_access(phy_id, idx, MII_WRITE);
1804         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1805
1806         ret = lan78xx_phy_wait_not_busy(dev);
1807         if (ret < 0)
1808                 goto done;
1809
1810 done:
1811         mutex_unlock(&dev->phy_mutex);
1812         usb_autopm_put_interface(dev->intf);
1813         return 0;
1814 }
1815
1816 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1817 {
1818         struct device_node *node;
1819         int ret;
1820
1821         dev->mdiobus = mdiobus_alloc();
1822         if (!dev->mdiobus) {
1823                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1824                 return -ENOMEM;
1825         }
1826
1827         dev->mdiobus->priv = (void *)dev;
1828         dev->mdiobus->read = lan78xx_mdiobus_read;
1829         dev->mdiobus->write = lan78xx_mdiobus_write;
1830         dev->mdiobus->name = "lan78xx-mdiobus";
1831         dev->mdiobus->parent = &dev->udev->dev;
1832
1833         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1834                  dev->udev->bus->busnum, dev->udev->devnum);
1835
1836         switch (dev->chipid) {
1837         case ID_REV_CHIP_ID_7800_:
1838         case ID_REV_CHIP_ID_7850_:
1839                 /* set to internal PHY id */
1840                 dev->mdiobus->phy_mask = ~(1 << 1);
1841                 break;
1842         case ID_REV_CHIP_ID_7801_:
1843                 /* scan thru PHYAD[2..0] */
1844                 dev->mdiobus->phy_mask = ~(0xFF);
1845                 break;
1846         }
1847
1848         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1849         ret = of_mdiobus_register(dev->mdiobus, node);
1850         if (node)
1851                 of_node_put(node);
1852         if (ret) {
1853                 netdev_err(dev->net, "can't register MDIO bus\n");
1854                 goto exit1;
1855         }
1856
1857         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1858         return 0;
1859 exit1:
1860         mdiobus_free(dev->mdiobus);
1861         return ret;
1862 }
1863
1864 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1865 {
1866         mdiobus_unregister(dev->mdiobus);
1867         mdiobus_free(dev->mdiobus);
1868 }
1869
1870 static void lan78xx_link_status_change(struct net_device *net)
1871 {
1872         struct phy_device *phydev = net->phydev;
1873         int ret, temp;
1874
1875         /* At forced 100 F/H mode, chip may fail to set mode correctly
1876          * when cable is switched between long(~50+m) and short one.
1877          * As workaround, set to 10 before setting to 100
1878          * at forced 100 F/H mode.
1879          */
1880         if (!phydev->autoneg && (phydev->speed == 100)) {
1881                 /* disable phy interrupt */
1882                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1883                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1884                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1885
1886                 temp = phy_read(phydev, MII_BMCR);
1887                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1888                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1889                 temp |= BMCR_SPEED100;
1890                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1891
1892                 /* clear pending interrupt generated while workaround */
1893                 temp = phy_read(phydev, LAN88XX_INT_STS);
1894
1895                 /* enable phy interrupt back */
1896                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1897                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1898                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1899         }
1900 }
1901
1902 static int irq_map(struct irq_domain *d, unsigned int irq,
1903                    irq_hw_number_t hwirq)
1904 {
1905         struct irq_domain_data *data = d->host_data;
1906
1907         irq_set_chip_data(irq, data);
1908         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1909         irq_set_noprobe(irq);
1910
1911         return 0;
1912 }
1913
1914 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1915 {
1916         irq_set_chip_and_handler(irq, NULL, NULL);
1917         irq_set_chip_data(irq, NULL);
1918 }
1919
1920 static const struct irq_domain_ops chip_domain_ops = {
1921         .map    = irq_map,
1922         .unmap  = irq_unmap,
1923 };
1924
1925 static void lan78xx_irq_mask(struct irq_data *irqd)
1926 {
1927         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928
1929         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1930 }
1931
1932 static void lan78xx_irq_unmask(struct irq_data *irqd)
1933 {
1934         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1935
1936         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1937 }
1938
1939 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1940 {
1941         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1942
1943         mutex_lock(&data->irq_lock);
1944 }
1945
1946 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1947 {
1948         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1949         struct lan78xx_net *dev =
1950                         container_of(data, struct lan78xx_net, domain_data);
1951         u32 buf;
1952         int ret;
1953
1954         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1955          * are only two callbacks executed in non-atomic contex.
1956          */
1957         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1958         if (buf != data->irqenable)
1959                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1960
1961         mutex_unlock(&data->irq_lock);
1962 }
1963
1964 static struct irq_chip lan78xx_irqchip = {
1965         .name                   = "lan78xx-irqs",
1966         .irq_mask               = lan78xx_irq_mask,
1967         .irq_unmask             = lan78xx_irq_unmask,
1968         .irq_bus_lock           = lan78xx_irq_bus_lock,
1969         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1970 };
1971
1972 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1973 {
1974         struct device_node *of_node;
1975         struct irq_domain *irqdomain;
1976         unsigned int irqmap = 0;
1977         u32 buf;
1978         int ret = 0;
1979
1980         of_node = dev->udev->dev.parent->of_node;
1981
1982         mutex_init(&dev->domain_data.irq_lock);
1983
1984         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1985         dev->domain_data.irqenable = buf;
1986
1987         dev->domain_data.irqchip = &lan78xx_irqchip;
1988         dev->domain_data.irq_handler = handle_simple_irq;
1989
1990         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1991                                           &chip_domain_ops, &dev->domain_data);
1992         if (irqdomain) {
1993                 /* create mapping for PHY interrupt */
1994                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1995                 if (!irqmap) {
1996                         irq_domain_remove(irqdomain);
1997
1998                         irqdomain = NULL;
1999                         ret = -EINVAL;
2000                 }
2001         } else {
2002                 ret = -EINVAL;
2003         }
2004
2005         dev->domain_data.irqdomain = irqdomain;
2006         dev->domain_data.phyirq = irqmap;
2007
2008         return ret;
2009 }
2010
2011 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2012 {
2013         if (dev->domain_data.phyirq > 0) {
2014                 irq_dispose_mapping(dev->domain_data.phyirq);
2015
2016                 if (dev->domain_data.irqdomain)
2017                         irq_domain_remove(dev->domain_data.irqdomain);
2018         }
2019         dev->domain_data.phyirq = 0;
2020         dev->domain_data.irqdomain = NULL;
2021 }
2022
2023 static int lan8835_fixup(struct phy_device *phydev)
2024 {
2025         int buf;
2026         int ret;
2027         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2028
2029         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2030         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2031         buf &= ~0x1800;
2032         buf |= 0x0800;
2033         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2034
2035         /* RGMII MAC TXC Delay Enable */
2036         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2037                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2038
2039         /* RGMII TX DLL Tune Adjust */
2040         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2041
2042         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2043
2044         return 1;
2045 }
2046
2047 static int ksz9031rnx_fixup(struct phy_device *phydev)
2048 {
2049         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2050
2051         /* Micrel9301RNX PHY configuration */
2052         /* RGMII Control Signal Pad Skew */
2053         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2054         /* RGMII RX Data Pad Skew */
2055         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2056         /* RGMII RX Clock Pad Skew */
2057         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2058
2059         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2060
2061         return 1;
2062 }
2063
2064 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2065 {
2066         u32 buf;
2067         int ret;
2068         struct fixed_phy_status fphy_status = {
2069                 .link = 1,
2070                 .speed = SPEED_1000,
2071                 .duplex = DUPLEX_FULL,
2072         };
2073         struct phy_device *phydev;
2074
2075         phydev = phy_find_first(dev->mdiobus);
2076         if (!phydev) {
2077                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2078                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2079                                             NULL);
2080                 if (IS_ERR(phydev)) {
2081                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2082                         return NULL;
2083                 }
2084                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2085                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2086                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2087                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2088                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2089                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2090                 buf |= HW_CFG_CLK125_EN_;
2091                 buf |= HW_CFG_REFCLK25_EN_;
2092                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2093         } else {
2094                 if (!phydev->drv) {
2095                         netdev_err(dev->net, "no PHY driver found\n");
2096                         return NULL;
2097                 }
2098                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2099                 /* external PHY fixup for KSZ9031RNX */
2100                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2101                                                  ksz9031rnx_fixup);
2102                 if (ret < 0) {
2103                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2104                         return NULL;
2105                 }
2106                 /* external PHY fixup for LAN8835 */
2107                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2108                                                  lan8835_fixup);
2109                 if (ret < 0) {
2110                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2111                         return NULL;
2112                 }
2113                 /* add more external PHY fixup here if needed */
2114
2115                 phydev->is_internal = false;
2116         }
2117         return phydev;
2118 }
2119
2120 static int lan78xx_phy_init(struct lan78xx_net *dev)
2121 {
2122         int ret;
2123         u32 mii_adv;
2124         struct phy_device *phydev;
2125
2126         switch (dev->chipid) {
2127         case ID_REV_CHIP_ID_7801_:
2128                 phydev = lan7801_phy_init(dev);
2129                 if (!phydev) {
2130                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2131                         return -EIO;
2132                 }
2133                 break;
2134
2135         case ID_REV_CHIP_ID_7800_:
2136         case ID_REV_CHIP_ID_7850_:
2137                 phydev = phy_find_first(dev->mdiobus);
2138                 if (!phydev) {
2139                         netdev_err(dev->net, "no PHY found\n");
2140                         return -EIO;
2141                 }
2142                 phydev->is_internal = true;
2143                 dev->interface = PHY_INTERFACE_MODE_GMII;
2144                 break;
2145
2146         default:
2147                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2148                 return -EIO;
2149         }
2150
2151         /* if phyirq is not set, use polling mode in phylib */
2152         if (dev->domain_data.phyirq > 0)
2153                 phydev->irq = dev->domain_data.phyirq;
2154         else
2155                 phydev->irq = PHY_POLL;
2156         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2157
2158         /* set to AUTOMDIX */
2159         phydev->mdix = ETH_TP_MDI_AUTO;
2160
2161         ret = phy_connect_direct(dev->net, phydev,
2162                                  lan78xx_link_status_change,
2163                                  dev->interface);
2164         if (ret) {
2165                 netdev_err(dev->net, "can't attach PHY to %s\n",
2166                            dev->mdiobus->id);
2167                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2168                         if (phy_is_pseudo_fixed_link(phydev)) {
2169                                 fixed_phy_unregister(phydev);
2170                         } else {
2171                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2172                                                              0xfffffff0);
2173                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2174                                                              0xfffffff0);
2175                         }
2176                 }
2177                 return -EIO;
2178         }
2179
2180         /* MAC doesn't support 1000T Half */
2181         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2182
2183         /* support both flow controls */
2184         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2185         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2186         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2187         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2188
2189         if (phydev->mdio.dev.of_node) {
2190                 u32 reg;
2191                 int len;
2192
2193                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2194                                                       "microchip,led-modes",
2195                                                       sizeof(u32));
2196                 if (len >= 0) {
2197                         /* Ensure the appropriate LEDs are enabled */
2198                         lan78xx_read_reg(dev, HW_CFG, &reg);
2199                         reg &= ~(HW_CFG_LED0_EN_ |
2200                                  HW_CFG_LED1_EN_ |
2201                                  HW_CFG_LED2_EN_ |
2202                                  HW_CFG_LED3_EN_);
2203                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2204                                 (len > 1) * HW_CFG_LED1_EN_ |
2205                                 (len > 2) * HW_CFG_LED2_EN_ |
2206                                 (len > 3) * HW_CFG_LED3_EN_;
2207                         lan78xx_write_reg(dev, HW_CFG, reg);
2208                 }
2209         }
2210
2211         genphy_config_aneg(phydev);
2212
2213         dev->fc_autoneg = phydev->autoneg;
2214
2215         return 0;
2216 }
2217
2218 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2219 {
2220         int ret = 0;
2221         u32 buf;
2222         bool rxenabled;
2223
2224         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2225
2226         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2227
2228         if (rxenabled) {
2229                 buf &= ~MAC_RX_RXEN_;
2230                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2231         }
2232
2233         /* add 4 to size for FCS */
2234         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2235         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2236
2237         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2238
2239         if (rxenabled) {
2240                 buf |= MAC_RX_RXEN_;
2241                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2248 {
2249         struct sk_buff *skb;
2250         unsigned long flags;
2251         int count = 0;
2252
2253         spin_lock_irqsave(&q->lock, flags);
2254         while (!skb_queue_empty(q)) {
2255                 struct skb_data *entry;
2256                 struct urb *urb;
2257                 int ret;
2258
2259                 skb_queue_walk(q, skb) {
2260                         entry = (struct skb_data *)skb->cb;
2261                         if (entry->state != unlink_start)
2262                                 goto found;
2263                 }
2264                 break;
2265 found:
2266                 entry->state = unlink_start;
2267                 urb = entry->urb;
2268
2269                 /* Get reference count of the URB to avoid it to be
2270                  * freed during usb_unlink_urb, which may trigger
2271                  * use-after-free problem inside usb_unlink_urb since
2272                  * usb_unlink_urb is always racing with .complete
2273                  * handler(include defer_bh).
2274                  */
2275                 usb_get_urb(urb);
2276                 spin_unlock_irqrestore(&q->lock, flags);
2277                 /* during some PM-driven resume scenarios,
2278                  * these (async) unlinks complete immediately
2279                  */
2280                 ret = usb_unlink_urb(urb);
2281                 if (ret != -EINPROGRESS && ret != 0)
2282                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2283                 else
2284                         count++;
2285                 usb_put_urb(urb);
2286                 spin_lock_irqsave(&q->lock, flags);
2287         }
2288         spin_unlock_irqrestore(&q->lock, flags);
2289         return count;
2290 }
2291
2292 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2293 {
2294         struct lan78xx_net *dev = netdev_priv(netdev);
2295         int ll_mtu = new_mtu + netdev->hard_header_len;
2296         int old_hard_mtu = dev->hard_mtu;
2297         int old_rx_urb_size = dev->rx_urb_size;
2298         int ret;
2299
2300         /* no second zero-length packet read wanted after mtu-sized packets */
2301         if ((ll_mtu % dev->maxpacket) == 0)
2302                 return -EDOM;
2303
2304         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2305
2306         netdev->mtu = new_mtu;
2307
2308         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2309         if (dev->rx_urb_size == old_hard_mtu) {
2310                 dev->rx_urb_size = dev->hard_mtu;
2311                 if (dev->rx_urb_size > old_rx_urb_size) {
2312                         if (netif_running(dev->net)) {
2313                                 unlink_urbs(dev, &dev->rxq);
2314                                 tasklet_schedule(&dev->bh);
2315                         }
2316                 }
2317         }
2318
2319         return 0;
2320 }
2321
2322 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2323 {
2324         struct lan78xx_net *dev = netdev_priv(netdev);
2325         struct sockaddr *addr = p;
2326         u32 addr_lo, addr_hi;
2327         int ret;
2328
2329         if (netif_running(netdev))
2330                 return -EBUSY;
2331
2332         if (!is_valid_ether_addr(addr->sa_data))
2333                 return -EADDRNOTAVAIL;
2334
2335         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2336
2337         addr_lo = netdev->dev_addr[0] |
2338                   netdev->dev_addr[1] << 8 |
2339                   netdev->dev_addr[2] << 16 |
2340                   netdev->dev_addr[3] << 24;
2341         addr_hi = netdev->dev_addr[4] |
2342                   netdev->dev_addr[5] << 8;
2343
2344         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2345         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2346
2347         /* Added to support MAC address changes */
2348         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2349         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2350
2351         return 0;
2352 }
2353
2354 /* Enable or disable Rx checksum offload engine */
2355 static int lan78xx_set_features(struct net_device *netdev,
2356                                 netdev_features_t features)
2357 {
2358         struct lan78xx_net *dev = netdev_priv(netdev);
2359         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2360         unsigned long flags;
2361         int ret;
2362
2363         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2364
2365         if (features & NETIF_F_RXCSUM) {
2366                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2367                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2368         } else {
2369                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2370                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2371         }
2372
2373         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2374                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2375         else
2376                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2377
2378         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2379                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2380         else
2381                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2382
2383         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2384
2385         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2386
2387         return 0;
2388 }
2389
2390 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2391 {
2392         struct lan78xx_priv *pdata =
2393                         container_of(param, struct lan78xx_priv, set_vlan);
2394         struct lan78xx_net *dev = pdata->dev;
2395
2396         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2397                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2398 }
2399
2400 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2401                                    __be16 proto, u16 vid)
2402 {
2403         struct lan78xx_net *dev = netdev_priv(netdev);
2404         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2405         u16 vid_bit_index;
2406         u16 vid_dword_index;
2407
2408         vid_dword_index = (vid >> 5) & 0x7F;
2409         vid_bit_index = vid & 0x1F;
2410
2411         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2412
2413         /* defer register writes to a sleepable context */
2414         schedule_work(&pdata->set_vlan);
2415
2416         return 0;
2417 }
2418
2419 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2420                                     __be16 proto, u16 vid)
2421 {
2422         struct lan78xx_net *dev = netdev_priv(netdev);
2423         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2424         u16 vid_bit_index;
2425         u16 vid_dword_index;
2426
2427         vid_dword_index = (vid >> 5) & 0x7F;
2428         vid_bit_index = vid & 0x1F;
2429
2430         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2431
2432         /* defer register writes to a sleepable context */
2433         schedule_work(&pdata->set_vlan);
2434
2435         return 0;
2436 }
2437
2438 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2439 {
2440         int ret;
2441         u32 buf;
2442         u32 regs[6] = { 0 };
2443
2444         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2445         if (buf & USB_CFG1_LTM_ENABLE_) {
2446                 u8 temp[2];
2447                 /* Get values from EEPROM first */
2448                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2449                         if (temp[0] == 24) {
2450                                 ret = lan78xx_read_raw_eeprom(dev,
2451                                                               temp[1] * 2,
2452                                                               24,
2453                                                               (u8 *)regs);
2454                                 if (ret < 0)
2455                                         return;
2456                         }
2457                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2458                         if (temp[0] == 24) {
2459                                 ret = lan78xx_read_raw_otp(dev,
2460                                                            temp[1] * 2,
2461                                                            24,
2462                                                            (u8 *)regs);
2463                                 if (ret < 0)
2464                                         return;
2465                         }
2466                 }
2467         }
2468
2469         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2470         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2471         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2472         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2473         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2474         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2475 }
2476
2477 static int lan78xx_reset(struct lan78xx_net *dev)
2478 {
2479         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2480         u32 buf;
2481         int ret = 0;
2482         unsigned long timeout;
2483         u8 sig;
2484
2485         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2486         buf |= HW_CFG_LRST_;
2487         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2488
2489         timeout = jiffies + HZ;
2490         do {
2491                 mdelay(1);
2492                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2493                 if (time_after(jiffies, timeout)) {
2494                         netdev_warn(dev->net,
2495                                     "timeout on completion of LiteReset");
2496                         return -EIO;
2497                 }
2498         } while (buf & HW_CFG_LRST_);
2499
2500         lan78xx_init_mac_address(dev);
2501
2502         /* save DEVID for later usage */
2503         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2504         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2505         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2506
2507         /* Respond to the IN token with a NAK */
2508         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2509         buf |= USB_CFG_BIR_;
2510         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2511
2512         /* Init LTM */
2513         lan78xx_init_ltm(dev);
2514
2515         if (dev->udev->speed == USB_SPEED_SUPER) {
2516                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2517                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2518                 dev->rx_qlen = 4;
2519                 dev->tx_qlen = 4;
2520         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2521                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2522                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2523                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2524                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2525         } else {
2526                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2527                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2528                 dev->rx_qlen = 4;
2529                 dev->tx_qlen = 4;
2530         }
2531
2532         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2533         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2534
2535         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2536         buf |= HW_CFG_MEF_;
2537         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2538
2539         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2540         buf |= USB_CFG_BCE_;
2541         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2542
2543         /* set FIFO sizes */
2544         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2545         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2546
2547         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2548         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2549
2550         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2551         ret = lan78xx_write_reg(dev, FLOW, 0);
2552         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2553
2554         /* Don't need rfe_ctl_lock during initialisation */
2555         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2556         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2557         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2558
2559         /* Enable or disable checksum offload engines */
2560         lan78xx_set_features(dev->net, dev->net->features);
2561
2562         lan78xx_set_multicast(dev->net);
2563
2564         /* reset PHY */
2565         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2566         buf |= PMT_CTL_PHY_RST_;
2567         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2568
2569         timeout = jiffies + HZ;
2570         do {
2571                 mdelay(1);
2572                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2573                 if (time_after(jiffies, timeout)) {
2574                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2575                         return -EIO;
2576                 }
2577         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2578
2579         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2580         /* LAN7801 only has RGMII mode */
2581         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2582                 buf &= ~MAC_CR_GMII_EN_;
2583
2584         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2585                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2586                 if (!ret && sig != EEPROM_INDICATOR) {
2587                         /* Implies there is no external eeprom. Set mac speed */
2588                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2589                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2590                 }
2591         }
2592         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2593
2594         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2595         buf |= MAC_TX_TXEN_;
2596         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2597
2598         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2599         buf |= FCT_TX_CTL_EN_;
2600         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2601
2602         ret = lan78xx_set_rx_max_frame_length(dev,
2603                                               dev->net->mtu + VLAN_ETH_HLEN);
2604
2605         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2606         buf |= MAC_RX_RXEN_;
2607         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2608
2609         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2610         buf |= FCT_RX_CTL_EN_;
2611         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2612
2613         return 0;
2614 }
2615
2616 static void lan78xx_init_stats(struct lan78xx_net *dev)
2617 {
2618         u32 *p;
2619         int i;
2620
2621         /* initialize for stats update
2622          * some counters are 20bits and some are 32bits
2623          */
2624         p = (u32 *)&dev->stats.rollover_max;
2625         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2626                 p[i] = 0xFFFFF;
2627
2628         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2629         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2630         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2631         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2632         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2633         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2634         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2635         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2636         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2637         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2638
2639         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2640 }
2641
2642 static int lan78xx_open(struct net_device *net)
2643 {
2644         struct lan78xx_net *dev = netdev_priv(net);
2645         int ret;
2646
2647         ret = usb_autopm_get_interface(dev->intf);
2648         if (ret < 0)
2649                 goto out;
2650
2651         phy_start(net->phydev);
2652
2653         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2654
2655         /* for Link Check */
2656         if (dev->urb_intr) {
2657                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2658                 if (ret < 0) {
2659                         netif_err(dev, ifup, dev->net,
2660                                   "intr submit %d\n", ret);
2661                         goto done;
2662                 }
2663         }
2664
2665         lan78xx_init_stats(dev);
2666
2667         set_bit(EVENT_DEV_OPEN, &dev->flags);
2668
2669         netif_start_queue(net);
2670
2671         dev->link_on = false;
2672
2673         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2674 done:
2675         usb_autopm_put_interface(dev->intf);
2676
2677 out:
2678         return ret;
2679 }
2680
2681 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2682 {
2683         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2684         DECLARE_WAITQUEUE(wait, current);
2685         int temp;
2686
2687         /* ensure there are no more active urbs */
2688         add_wait_queue(&unlink_wakeup, &wait);
2689         set_current_state(TASK_UNINTERRUPTIBLE);
2690         dev->wait = &unlink_wakeup;
2691         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2692
2693         /* maybe wait for deletions to finish. */
2694         while (!skb_queue_empty(&dev->rxq) &&
2695                !skb_queue_empty(&dev->txq) &&
2696                !skb_queue_empty(&dev->done)) {
2697                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2698                 set_current_state(TASK_UNINTERRUPTIBLE);
2699                 netif_dbg(dev, ifdown, dev->net,
2700                           "waited for %d urb completions\n", temp);
2701         }
2702         set_current_state(TASK_RUNNING);
2703         dev->wait = NULL;
2704         remove_wait_queue(&unlink_wakeup, &wait);
2705 }
2706
2707 static int lan78xx_stop(struct net_device *net)
2708 {
2709         struct lan78xx_net              *dev = netdev_priv(net);
2710
2711         if (timer_pending(&dev->stat_monitor))
2712                 del_timer_sync(&dev->stat_monitor);
2713
2714         if (net->phydev)
2715                 phy_stop(net->phydev);
2716
2717         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2718         netif_stop_queue(net);
2719
2720         netif_info(dev, ifdown, dev->net,
2721                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2722                    net->stats.rx_packets, net->stats.tx_packets,
2723                    net->stats.rx_errors, net->stats.tx_errors);
2724
2725         lan78xx_terminate_urbs(dev);
2726
2727         usb_kill_urb(dev->urb_intr);
2728
2729         skb_queue_purge(&dev->rxq_pause);
2730
2731         /* deferred work (task, timer, softirq) must also stop.
2732          * can't flush_scheduled_work() until we drop rtnl (later),
2733          * else workers could deadlock; so make workers a NOP.
2734          */
2735         dev->flags = 0;
2736         cancel_delayed_work_sync(&dev->wq);
2737         tasklet_kill(&dev->bh);
2738
2739         usb_autopm_put_interface(dev->intf);
2740
2741         return 0;
2742 }
2743
2744 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2745                                        struct sk_buff *skb, gfp_t flags)
2746 {
2747         u32 tx_cmd_a, tx_cmd_b;
2748
2749         if (skb_cow_head(skb, TX_OVERHEAD)) {
2750                 dev_kfree_skb_any(skb);
2751                 return NULL;
2752         }
2753
2754         if (skb_linearize(skb)) {
2755                 dev_kfree_skb_any(skb);
2756                 return NULL;
2757         }
2758
2759         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2760
2761         if (skb->ip_summed == CHECKSUM_PARTIAL)
2762                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2763
2764         tx_cmd_b = 0;
2765         if (skb_is_gso(skb)) {
2766                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2767
2768                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2769
2770                 tx_cmd_a |= TX_CMD_A_LSO_;
2771         }
2772
2773         if (skb_vlan_tag_present(skb)) {
2774                 tx_cmd_a |= TX_CMD_A_IVTG_;
2775                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2776         }
2777
2778         skb_push(skb, 4);
2779         cpu_to_le32s(&tx_cmd_b);
2780         memcpy(skb->data, &tx_cmd_b, 4);
2781
2782         skb_push(skb, 4);
2783         cpu_to_le32s(&tx_cmd_a);
2784         memcpy(skb->data, &tx_cmd_a, 4);
2785
2786         return skb;
2787 }
2788
2789 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2790                                struct sk_buff_head *list, enum skb_state state)
2791 {
2792         unsigned long flags;
2793         enum skb_state old_state;
2794         struct skb_data *entry = (struct skb_data *)skb->cb;
2795
2796         spin_lock_irqsave(&list->lock, flags);
2797         old_state = entry->state;
2798         entry->state = state;
2799
2800         __skb_unlink(skb, list);
2801         spin_unlock(&list->lock);
2802         spin_lock(&dev->done.lock);
2803
2804         __skb_queue_tail(&dev->done, skb);
2805         if (skb_queue_len(&dev->done) == 1)
2806                 tasklet_schedule(&dev->bh);
2807         spin_unlock_irqrestore(&dev->done.lock, flags);
2808
2809         return old_state;
2810 }
2811
2812 static void tx_complete(struct urb *urb)
2813 {
2814         struct sk_buff *skb = (struct sk_buff *)urb->context;
2815         struct skb_data *entry = (struct skb_data *)skb->cb;
2816         struct lan78xx_net *dev = entry->dev;
2817
2818         if (urb->status == 0) {
2819                 dev->net->stats.tx_packets += entry->num_of_packet;
2820                 dev->net->stats.tx_bytes += entry->length;
2821         } else {
2822                 dev->net->stats.tx_errors++;
2823
2824                 switch (urb->status) {
2825                 case -EPIPE:
2826                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2827                         break;
2828
2829                 /* software-driven interface shutdown */
2830                 case -ECONNRESET:
2831                 case -ESHUTDOWN:
2832                         break;
2833
2834                 case -EPROTO:
2835                 case -ETIME:
2836                 case -EILSEQ:
2837                         netif_stop_queue(dev->net);
2838                         break;
2839                 default:
2840                         netif_dbg(dev, tx_err, dev->net,
2841                                   "tx err %d\n", entry->urb->status);
2842                         break;
2843                 }
2844         }
2845
2846         usb_autopm_put_interface_async(dev->intf);
2847
2848         defer_bh(dev, skb, &dev->txq, tx_done);
2849 }
2850
2851 static void lan78xx_queue_skb(struct sk_buff_head *list,
2852                               struct sk_buff *newsk, enum skb_state state)
2853 {
2854         struct skb_data *entry = (struct skb_data *)newsk->cb;
2855
2856         __skb_queue_tail(list, newsk);
2857         entry->state = state;
2858 }
2859
2860 static netdev_tx_t
2861 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2862 {
2863         struct lan78xx_net *dev = netdev_priv(net);
2864         struct sk_buff *skb2 = NULL;
2865
2866         if (skb) {
2867                 skb_tx_timestamp(skb);
2868                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2869         }
2870
2871         if (skb2) {
2872                 skb_queue_tail(&dev->txq_pend, skb2);
2873
2874                 /* throttle TX patch at slower than SUPER SPEED USB */
2875                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2876                     (skb_queue_len(&dev->txq_pend) > 10))
2877                         netif_stop_queue(net);
2878         } else {
2879                 netif_dbg(dev, tx_err, dev->net,
2880                           "lan78xx_tx_prep return NULL\n");
2881                 dev->net->stats.tx_errors++;
2882                 dev->net->stats.tx_dropped++;
2883         }
2884
2885         tasklet_schedule(&dev->bh);
2886
2887         return NETDEV_TX_OK;
2888 }
2889
2890 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2891 {
2892         struct lan78xx_priv *pdata = NULL;
2893         int ret;
2894         int i;
2895
2896         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2897
2898         pdata = (struct lan78xx_priv *)(dev->data[0]);
2899         if (!pdata) {
2900                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2901                 return -ENOMEM;
2902         }
2903
2904         pdata->dev = dev;
2905
2906         spin_lock_init(&pdata->rfe_ctl_lock);
2907         mutex_init(&pdata->dataport_mutex);
2908
2909         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2910
2911         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2912                 pdata->vlan_table[i] = 0;
2913
2914         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2915
2916         dev->net->features = 0;
2917
2918         if (DEFAULT_TX_CSUM_ENABLE)
2919                 dev->net->features |= NETIF_F_HW_CSUM;
2920
2921         if (DEFAULT_RX_CSUM_ENABLE)
2922                 dev->net->features |= NETIF_F_RXCSUM;
2923
2924         if (DEFAULT_TSO_CSUM_ENABLE)
2925                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2926
2927         if (DEFAULT_VLAN_RX_OFFLOAD)
2928                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2929
2930         if (DEFAULT_VLAN_FILTER_ENABLE)
2931                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2932
2933         dev->net->hw_features = dev->net->features;
2934
2935         ret = lan78xx_setup_irq_domain(dev);
2936         if (ret < 0) {
2937                 netdev_warn(dev->net,
2938                             "lan78xx_setup_irq_domain() failed : %d", ret);
2939                 goto out1;
2940         }
2941
2942         dev->net->hard_header_len += TX_OVERHEAD;
2943         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2944
2945         /* Init all registers */
2946         ret = lan78xx_reset(dev);
2947         if (ret) {
2948                 netdev_warn(dev->net, "Registers INIT FAILED....");
2949                 goto out2;
2950         }
2951
2952         ret = lan78xx_mdio_init(dev);
2953         if (ret) {
2954                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2955                 goto out2;
2956         }
2957
2958         dev->net->flags |= IFF_MULTICAST;
2959
2960         pdata->wol = WAKE_MAGIC;
2961
2962         return ret;
2963
2964 out2:
2965         lan78xx_remove_irq_domain(dev);
2966
2967 out1:
2968         netdev_warn(dev->net, "Bind routine FAILED");
2969         cancel_work_sync(&pdata->set_multicast);
2970         cancel_work_sync(&pdata->set_vlan);
2971         kfree(pdata);
2972         return ret;
2973 }
2974
2975 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2976 {
2977         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2978
2979         lan78xx_remove_irq_domain(dev);
2980
2981         lan78xx_remove_mdio(dev);
2982
2983         if (pdata) {
2984                 cancel_work_sync(&pdata->set_multicast);
2985                 cancel_work_sync(&pdata->set_vlan);
2986                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2987                 kfree(pdata);
2988                 pdata = NULL;
2989                 dev->data[0] = 0;
2990         }
2991 }
2992
2993 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2994                                     struct sk_buff *skb,
2995                                     u32 rx_cmd_a, u32 rx_cmd_b)
2996 {
2997         /* HW Checksum offload appears to be flawed if used when not stripping
2998          * VLAN headers. Drop back to S/W checksums under these conditions.
2999          */
3000         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3001             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3002             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3003              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3004                 skb->ip_summed = CHECKSUM_NONE;
3005         } else {
3006                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3007                 skb->ip_summed = CHECKSUM_COMPLETE;
3008         }
3009 }
3010
3011 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3012                                     struct sk_buff *skb,
3013                                     u32 rx_cmd_a, u32 rx_cmd_b)
3014 {
3015         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3016             (rx_cmd_a & RX_CMD_A_FVTG_))
3017                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3018                                        (rx_cmd_b & 0xffff));
3019 }
3020
3021 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3022 {
3023         int             status;
3024
3025         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3026                 skb_queue_tail(&dev->rxq_pause, skb);
3027                 return;
3028         }
3029
3030         dev->net->stats.rx_packets++;
3031         dev->net->stats.rx_bytes += skb->len;
3032
3033         skb->protocol = eth_type_trans(skb, dev->net);
3034
3035         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3036                   skb->len + sizeof(struct ethhdr), skb->protocol);
3037         memset(skb->cb, 0, sizeof(struct skb_data));
3038
3039         if (skb_defer_rx_timestamp(skb))
3040                 return;
3041
3042         status = netif_rx(skb);
3043         if (status != NET_RX_SUCCESS)
3044                 netif_dbg(dev, rx_err, dev->net,
3045                           "netif_rx status %d\n", status);
3046 }
3047
3048 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3049 {
3050         if (skb->len < dev->net->hard_header_len)
3051                 return 0;
3052
3053         while (skb->len > 0) {
3054                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3055                 u16 rx_cmd_c;
3056                 struct sk_buff *skb2;
3057                 unsigned char *packet;
3058
3059                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3060                 le32_to_cpus(&rx_cmd_a);
3061                 skb_pull(skb, sizeof(rx_cmd_a));
3062
3063                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3064                 le32_to_cpus(&rx_cmd_b);
3065                 skb_pull(skb, sizeof(rx_cmd_b));
3066
3067                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3068                 le16_to_cpus(&rx_cmd_c);
3069                 skb_pull(skb, sizeof(rx_cmd_c));
3070
3071                 packet = skb->data;
3072
3073                 /* get the packet length */
3074                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3075                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3076
3077                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3078                         netif_dbg(dev, rx_err, dev->net,
3079                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3080                 } else {
3081                         /* last frame in this batch */
3082                         if (skb->len == size) {
3083                                 lan78xx_rx_csum_offload(dev, skb,
3084                                                         rx_cmd_a, rx_cmd_b);
3085                                 lan78xx_rx_vlan_offload(dev, skb,
3086                                                         rx_cmd_a, rx_cmd_b);
3087
3088                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3089                                 skb->truesize = size + sizeof(struct sk_buff);
3090
3091                                 return 1;
3092                         }
3093
3094                         skb2 = skb_clone(skb, GFP_ATOMIC);
3095                         if (unlikely(!skb2)) {
3096                                 netdev_warn(dev->net, "Error allocating skb");
3097                                 return 0;
3098                         }
3099
3100                         skb2->len = size;
3101                         skb2->data = packet;
3102                         skb_set_tail_pointer(skb2, size);
3103
3104                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3105                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3106
3107                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3108                         skb2->truesize = size + sizeof(struct sk_buff);
3109
3110                         lan78xx_skb_return(dev, skb2);
3111                 }
3112
3113                 skb_pull(skb, size);
3114
3115                 /* padding bytes before the next frame starts */
3116                 if (skb->len)
3117                         skb_pull(skb, align_count);
3118         }
3119
3120         return 1;
3121 }
3122
3123 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3124 {
3125         if (!lan78xx_rx(dev, skb)) {
3126                 dev->net->stats.rx_errors++;
3127                 goto done;
3128         }
3129
3130         if (skb->len) {
3131                 lan78xx_skb_return(dev, skb);
3132                 return;
3133         }
3134
3135         netif_dbg(dev, rx_err, dev->net, "drop\n");
3136         dev->net->stats.rx_errors++;
3137 done:
3138         skb_queue_tail(&dev->done, skb);
3139 }
3140
3141 static void rx_complete(struct urb *urb);
3142
3143 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3144 {
3145         struct sk_buff *skb;
3146         struct skb_data *entry;
3147         unsigned long lockflags;
3148         size_t size = dev->rx_urb_size;
3149         int ret = 0;
3150
3151         skb = netdev_alloc_skb_ip_align(dev->net, size);
3152         if (!skb) {
3153                 usb_free_urb(urb);
3154                 return -ENOMEM;
3155         }
3156
3157         entry = (struct skb_data *)skb->cb;
3158         entry->urb = urb;
3159         entry->dev = dev;
3160         entry->length = 0;
3161
3162         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3163                           skb->data, size, rx_complete, skb);
3164
3165         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3166
3167         if (netif_device_present(dev->net) &&
3168             netif_running(dev->net) &&
3169             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3170             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3171                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3172                 switch (ret) {
3173                 case 0:
3174                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3175                         break;
3176                 case -EPIPE:
3177                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3178                         break;
3179                 case -ENODEV:
3180                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3181                         netif_device_detach(dev->net);
3182                         break;
3183                 case -EHOSTUNREACH:
3184                         ret = -ENOLINK;
3185                         break;
3186                 default:
3187                         netif_dbg(dev, rx_err, dev->net,
3188                                   "rx submit, %d\n", ret);
3189                         tasklet_schedule(&dev->bh);
3190                 }
3191         } else {
3192                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3193                 ret = -ENOLINK;
3194         }
3195         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3196         if (ret) {
3197                 dev_kfree_skb_any(skb);
3198                 usb_free_urb(urb);
3199         }
3200         return ret;
3201 }
3202
3203 static void rx_complete(struct urb *urb)
3204 {
3205         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3206         struct skb_data *entry = (struct skb_data *)skb->cb;
3207         struct lan78xx_net *dev = entry->dev;
3208         int urb_status = urb->status;
3209         enum skb_state state;
3210
3211         skb_put(skb, urb->actual_length);
3212         state = rx_done;
3213         entry->urb = NULL;
3214
3215         switch (urb_status) {
3216         case 0:
3217                 if (skb->len < dev->net->hard_header_len) {
3218                         state = rx_cleanup;
3219                         dev->net->stats.rx_errors++;
3220                         dev->net->stats.rx_length_errors++;
3221                         netif_dbg(dev, rx_err, dev->net,
3222                                   "rx length %d\n", skb->len);
3223                 }
3224                 usb_mark_last_busy(dev->udev);
3225                 break;
3226         case -EPIPE:
3227                 dev->net->stats.rx_errors++;
3228                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3229                 /* FALLTHROUGH */
3230         case -ECONNRESET:                               /* async unlink */
3231         case -ESHUTDOWN:                                /* hardware gone */
3232                 netif_dbg(dev, ifdown, dev->net,
3233                           "rx shutdown, code %d\n", urb_status);
3234                 state = rx_cleanup;
3235                 entry->urb = urb;
3236                 urb = NULL;
3237                 break;
3238         case -EPROTO:
3239         case -ETIME:
3240         case -EILSEQ:
3241                 dev->net->stats.rx_errors++;
3242                 state = rx_cleanup;
3243                 entry->urb = urb;
3244                 urb = NULL;
3245                 break;
3246
3247         /* data overrun ... flush fifo? */
3248         case -EOVERFLOW:
3249                 dev->net->stats.rx_over_errors++;
3250                 /* FALLTHROUGH */
3251
3252         default:
3253                 state = rx_cleanup;
3254                 dev->net->stats.rx_errors++;
3255                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3256                 break;
3257         }
3258
3259         state = defer_bh(dev, skb, &dev->rxq, state);
3260
3261         if (urb) {
3262                 if (netif_running(dev->net) &&
3263                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3264                     state != unlink_start) {
3265                         rx_submit(dev, urb, GFP_ATOMIC);
3266                         return;
3267                 }
3268                 usb_free_urb(urb);
3269         }
3270         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3271 }
3272
3273 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3274 {
3275         int length;
3276         struct urb *urb = NULL;
3277         struct skb_data *entry;
3278         unsigned long flags;
3279         struct sk_buff_head *tqp = &dev->txq_pend;
3280         struct sk_buff *skb, *skb2;
3281         int ret;
3282         int count, pos;
3283         int skb_totallen, pkt_cnt;
3284
3285         skb_totallen = 0;
3286         pkt_cnt = 0;
3287         count = 0;
3288         length = 0;
3289         spin_lock_irqsave(&tqp->lock, flags);
3290         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3291                 if (skb_is_gso(skb)) {
3292                         if (pkt_cnt) {
3293                                 /* handle previous packets first */
3294                                 break;
3295                         }
3296                         count = 1;
3297                         length = skb->len - TX_OVERHEAD;
3298                         __skb_unlink(skb, tqp);
3299                         spin_unlock_irqrestore(&tqp->lock, flags);
3300                         goto gso_skb;
3301                 }
3302
3303                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3304                         break;
3305                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3306                 pkt_cnt++;
3307         }
3308         spin_unlock_irqrestore(&tqp->lock, flags);
3309
3310         /* copy to a single skb */
3311         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3312         if (!skb)
3313                 goto drop;
3314
3315         skb_put(skb, skb_totallen);
3316
3317         for (count = pos = 0; count < pkt_cnt; count++) {
3318                 skb2 = skb_dequeue(tqp);
3319                 if (skb2) {
3320                         length += (skb2->len - TX_OVERHEAD);
3321                         memcpy(skb->data + pos, skb2->data, skb2->len);
3322                         pos += roundup(skb2->len, sizeof(u32));
3323                         dev_kfree_skb(skb2);
3324                 }
3325         }
3326
3327 gso_skb:
3328         urb = usb_alloc_urb(0, GFP_ATOMIC);
3329         if (!urb)
3330                 goto drop;
3331
3332         entry = (struct skb_data *)skb->cb;
3333         entry->urb = urb;
3334         entry->dev = dev;
3335         entry->length = length;
3336         entry->num_of_packet = count;
3337
3338         spin_lock_irqsave(&dev->txq.lock, flags);
3339         ret = usb_autopm_get_interface_async(dev->intf);
3340         if (ret < 0) {
3341                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3342                 goto drop;
3343         }
3344
3345         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3346                           skb->data, skb->len, tx_complete, skb);
3347
3348         if (length % dev->maxpacket == 0) {
3349                 /* send USB_ZERO_PACKET */
3350                 urb->transfer_flags |= URB_ZERO_PACKET;
3351         }
3352
3353 #ifdef CONFIG_PM
3354         /* if this triggers the device is still a sleep */
3355         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3356                 /* transmission will be done in resume */
3357                 usb_anchor_urb(urb, &dev->deferred);
3358                 /* no use to process more packets */
3359                 netif_stop_queue(dev->net);
3360                 usb_put_urb(urb);
3361                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3362                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3363                 return;
3364         }
3365 #endif
3366
3367         ret = usb_submit_urb(urb, GFP_ATOMIC);
3368         switch (ret) {
3369         case 0:
3370                 netif_trans_update(dev->net);
3371                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3372                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3373                         netif_stop_queue(dev->net);
3374                 break;
3375         case -EPIPE:
3376                 netif_stop_queue(dev->net);
3377                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3378                 usb_autopm_put_interface_async(dev->intf);
3379                 break;
3380         default:
3381                 usb_autopm_put_interface_async(dev->intf);
3382                 netif_dbg(dev, tx_err, dev->net,
3383                           "tx: submit urb err %d\n", ret);
3384                 break;
3385         }
3386
3387         spin_unlock_irqrestore(&dev->txq.lock, flags);
3388
3389         if (ret) {
3390                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3391 drop:
3392                 dev->net->stats.tx_dropped++;
3393                 if (skb)
3394                         dev_kfree_skb_any(skb);
3395                 usb_free_urb(urb);
3396         } else
3397                 netif_dbg(dev, tx_queued, dev->net,
3398                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3399 }
3400
3401 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3402 {
3403         struct urb *urb;
3404         int i;
3405
3406         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3407                 for (i = 0; i < 10; i++) {
3408                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3409                                 break;
3410                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3411                         if (urb)
3412                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3413                                         return;
3414                 }
3415
3416                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3417                         tasklet_schedule(&dev->bh);
3418         }
3419         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3420                 netif_wake_queue(dev->net);
3421 }
3422
3423 static void lan78xx_bh(unsigned long param)
3424 {
3425         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3426         struct sk_buff *skb;
3427         struct skb_data *entry;
3428
3429         while ((skb = skb_dequeue(&dev->done))) {
3430                 entry = (struct skb_data *)(skb->cb);
3431                 switch (entry->state) {
3432                 case rx_done:
3433                         entry->state = rx_cleanup;
3434                         rx_process(dev, skb);
3435                         continue;
3436                 case tx_done:
3437                         usb_free_urb(entry->urb);
3438                         dev_kfree_skb(skb);
3439                         continue;
3440                 case rx_cleanup:
3441                         usb_free_urb(entry->urb);
3442                         dev_kfree_skb(skb);
3443                         continue;
3444                 default:
3445                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3446                         return;
3447                 }
3448         }
3449
3450         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3451                 /* reset update timer delta */
3452                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3453                         dev->delta = 1;
3454                         mod_timer(&dev->stat_monitor,
3455                                   jiffies + STAT_UPDATE_TIMER);
3456                 }
3457
3458                 if (!skb_queue_empty(&dev->txq_pend))
3459                         lan78xx_tx_bh(dev);
3460
3461                 if (!timer_pending(&dev->delay) &&
3462                     !test_bit(EVENT_RX_HALT, &dev->flags))
3463                         lan78xx_rx_bh(dev);
3464         }
3465 }
3466
3467 static void lan78xx_delayedwork(struct work_struct *work)
3468 {
3469         int status;
3470         struct lan78xx_net *dev;
3471
3472         dev = container_of(work, struct lan78xx_net, wq.work);
3473
3474         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3475                 unlink_urbs(dev, &dev->txq);
3476                 status = usb_autopm_get_interface(dev->intf);
3477                 if (status < 0)
3478                         goto fail_pipe;
3479                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3480                 usb_autopm_put_interface(dev->intf);
3481                 if (status < 0 &&
3482                     status != -EPIPE &&
3483                     status != -ESHUTDOWN) {
3484                         if (netif_msg_tx_err(dev))
3485 fail_pipe:
3486                                 netdev_err(dev->net,
3487                                            "can't clear tx halt, status %d\n",
3488                                            status);
3489                 } else {
3490                         clear_bit(EVENT_TX_HALT, &dev->flags);
3491                         if (status != -ESHUTDOWN)
3492                                 netif_wake_queue(dev->net);
3493                 }
3494         }
3495         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3496                 unlink_urbs(dev, &dev->rxq);
3497                 status = usb_autopm_get_interface(dev->intf);
3498                 if (status < 0)
3499                                 goto fail_halt;
3500                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3501                 usb_autopm_put_interface(dev->intf);
3502                 if (status < 0 &&
3503                     status != -EPIPE &&
3504                     status != -ESHUTDOWN) {
3505                         if (netif_msg_rx_err(dev))
3506 fail_halt:
3507                                 netdev_err(dev->net,
3508                                            "can't clear rx halt, status %d\n",
3509                                            status);
3510                 } else {
3511                         clear_bit(EVENT_RX_HALT, &dev->flags);
3512                         tasklet_schedule(&dev->bh);
3513                 }
3514         }
3515
3516         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3517                 int ret = 0;
3518
3519                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3520                 status = usb_autopm_get_interface(dev->intf);
3521                 if (status < 0)
3522                         goto skip_reset;
3523                 if (lan78xx_link_reset(dev) < 0) {
3524                         usb_autopm_put_interface(dev->intf);
3525 skip_reset:
3526                         netdev_info(dev->net, "link reset failed (%d)\n",
3527                                     ret);
3528                 } else {
3529                         usb_autopm_put_interface(dev->intf);
3530                 }
3531         }
3532
3533         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3534                 lan78xx_update_stats(dev);
3535
3536                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3537
3538                 mod_timer(&dev->stat_monitor,
3539                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3540
3541                 dev->delta = min((dev->delta * 2), 50);
3542         }
3543 }
3544
3545 static void intr_complete(struct urb *urb)
3546 {
3547         struct lan78xx_net *dev = urb->context;
3548         int status = urb->status;
3549
3550         switch (status) {
3551         /* success */
3552         case 0:
3553                 lan78xx_status(dev, urb);
3554                 break;
3555
3556         /* software-driven interface shutdown */
3557         case -ENOENT:                   /* urb killed */
3558         case -ESHUTDOWN:                /* hardware gone */
3559                 netif_dbg(dev, ifdown, dev->net,
3560                           "intr shutdown, code %d\n", status);
3561                 return;
3562
3563         /* NOTE:  not throttling like RX/TX, since this endpoint
3564          * already polls infrequently
3565          */
3566         default:
3567                 netdev_dbg(dev->net, "intr status %d\n", status);
3568                 break;
3569         }
3570
3571         if (!netif_running(dev->net))
3572                 return;
3573
3574         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3575         status = usb_submit_urb(urb, GFP_ATOMIC);
3576         if (status != 0)
3577                 netif_err(dev, timer, dev->net,
3578                           "intr resubmit --> %d\n", status);
3579 }
3580
3581 static void lan78xx_disconnect(struct usb_interface *intf)
3582 {
3583         struct lan78xx_net              *dev;
3584         struct usb_device               *udev;
3585         struct net_device               *net;
3586         struct phy_device               *phydev;
3587
3588         dev = usb_get_intfdata(intf);
3589         usb_set_intfdata(intf, NULL);
3590         if (!dev)
3591                 return;
3592
3593         udev = interface_to_usbdev(intf);
3594         net = dev->net;
3595         phydev = net->phydev;
3596
3597         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3598         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3599
3600         phy_disconnect(net->phydev);
3601
3602         if (phy_is_pseudo_fixed_link(phydev))
3603                 fixed_phy_unregister(phydev);
3604
3605         unregister_netdev(net);
3606
3607         cancel_delayed_work_sync(&dev->wq);
3608
3609         usb_scuttle_anchored_urbs(&dev->deferred);
3610
3611         lan78xx_unbind(dev, intf);
3612
3613         usb_kill_urb(dev->urb_intr);
3614         usb_free_urb(dev->urb_intr);
3615
3616         free_netdev(net);
3617         usb_put_dev(udev);
3618 }
3619
3620 static void lan78xx_tx_timeout(struct net_device *net)
3621 {
3622         struct lan78xx_net *dev = netdev_priv(net);
3623
3624         unlink_urbs(dev, &dev->txq);
3625         tasklet_schedule(&dev->bh);
3626 }
3627
3628 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3629                                                 struct net_device *netdev,
3630                                                 netdev_features_t features)
3631 {
3632         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3633                 features &= ~NETIF_F_GSO_MASK;
3634
3635         features = vlan_features_check(skb, features);
3636         features = vxlan_features_check(skb, features);
3637
3638         return features;
3639 }
3640
3641 static const struct net_device_ops lan78xx_netdev_ops = {
3642         .ndo_open               = lan78xx_open,
3643         .ndo_stop               = lan78xx_stop,
3644         .ndo_start_xmit         = lan78xx_start_xmit,
3645         .ndo_tx_timeout         = lan78xx_tx_timeout,
3646         .ndo_change_mtu         = lan78xx_change_mtu,
3647         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3648         .ndo_validate_addr      = eth_validate_addr,
3649         .ndo_do_ioctl           = lan78xx_ioctl,
3650         .ndo_set_rx_mode        = lan78xx_set_multicast,
3651         .ndo_set_features       = lan78xx_set_features,
3652         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3653         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3654         .ndo_features_check     = lan78xx_features_check,
3655 };
3656
3657 static void lan78xx_stat_monitor(struct timer_list *t)
3658 {
3659         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3660
3661         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3662 }
3663
3664 static int lan78xx_probe(struct usb_interface *intf,
3665                          const struct usb_device_id *id)
3666 {
3667         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3668         struct lan78xx_net *dev;
3669         struct net_device *netdev;
3670         struct usb_device *udev;
3671         int ret;
3672         unsigned maxp;
3673         unsigned period;
3674         u8 *buf = NULL;
3675
3676         udev = interface_to_usbdev(intf);
3677         udev = usb_get_dev(udev);
3678
3679         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3680         if (!netdev) {
3681                 dev_err(&intf->dev, "Error: OOM\n");
3682                 ret = -ENOMEM;
3683                 goto out1;
3684         }
3685
3686         /* netdev_printk() needs this */
3687         SET_NETDEV_DEV(netdev, &intf->dev);
3688
3689         dev = netdev_priv(netdev);
3690         dev->udev = udev;
3691         dev->intf = intf;
3692         dev->net = netdev;
3693         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3694                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3695
3696         skb_queue_head_init(&dev->rxq);
3697         skb_queue_head_init(&dev->txq);
3698         skb_queue_head_init(&dev->done);
3699         skb_queue_head_init(&dev->rxq_pause);
3700         skb_queue_head_init(&dev->txq_pend);
3701         mutex_init(&dev->phy_mutex);
3702
3703         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3704         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3705         init_usb_anchor(&dev->deferred);
3706
3707         netdev->netdev_ops = &lan78xx_netdev_ops;
3708         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3709         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3710
3711         dev->delta = 1;
3712         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3713
3714         mutex_init(&dev->stats.access_lock);
3715
3716         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3717                 ret = -ENODEV;
3718                 goto out2;
3719         }
3720
3721         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3722         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3723         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3724                 ret = -ENODEV;
3725                 goto out2;
3726         }
3727
3728         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3729         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3730         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3731                 ret = -ENODEV;
3732                 goto out2;
3733         }
3734
3735         ep_intr = &intf->cur_altsetting->endpoint[2];
3736         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3737                 ret = -ENODEV;
3738                 goto out2;
3739         }
3740
3741         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3742                                         usb_endpoint_num(&ep_intr->desc));
3743
3744         ret = lan78xx_bind(dev, intf);
3745         if (ret < 0)
3746                 goto out2;
3747         strcpy(netdev->name, "eth%d");
3748
3749         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3750                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3751
3752         /* MTU range: 68 - 9000 */
3753         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3754         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3755
3756         period = ep_intr->desc.bInterval;
3757         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3758         buf = kmalloc(maxp, GFP_KERNEL);
3759         if (buf) {
3760                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3761                 if (!dev->urb_intr) {
3762                         ret = -ENOMEM;
3763                         kfree(buf);
3764                         goto out3;
3765                 } else {
3766                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3767                                          dev->pipe_intr, buf, maxp,
3768                                          intr_complete, dev, period);
3769                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3770                 }
3771         }
3772
3773         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3774
3775         /* Reject broken descriptors. */
3776         if (dev->maxpacket == 0) {
3777                 ret = -ENODEV;
3778                 goto out4;
3779         }
3780
3781         /* driver requires remote-wakeup capability during autosuspend. */
3782         intf->needs_remote_wakeup = 1;
3783
3784         ret = lan78xx_phy_init(dev);
3785         if (ret < 0)
3786                 goto out4;
3787
3788         ret = register_netdev(netdev);
3789         if (ret != 0) {
3790                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3791                 goto out5;
3792         }
3793
3794         usb_set_intfdata(intf, dev);
3795
3796         ret = device_set_wakeup_enable(&udev->dev, true);
3797
3798          /* Default delay of 2sec has more overhead than advantage.
3799           * Set to 10sec as default.
3800           */
3801         pm_runtime_set_autosuspend_delay(&udev->dev,
3802                                          DEFAULT_AUTOSUSPEND_DELAY);
3803
3804         return 0;
3805
3806 out5:
3807         phy_disconnect(netdev->phydev);
3808 out4:
3809         usb_free_urb(dev->urb_intr);
3810 out3:
3811         lan78xx_unbind(dev, intf);
3812 out2:
3813         free_netdev(netdev);
3814 out1:
3815         usb_put_dev(udev);
3816
3817         return ret;
3818 }
3819
3820 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3821 {
3822         const u16 crc16poly = 0x8005;
3823         int i;
3824         u16 bit, crc, msb;
3825         u8 data;
3826
3827         crc = 0xFFFF;
3828         for (i = 0; i < len; i++) {
3829                 data = *buf++;
3830                 for (bit = 0; bit < 8; bit++) {
3831                         msb = crc >> 15;
3832                         crc <<= 1;
3833
3834                         if (msb ^ (u16)(data & 1)) {
3835                                 crc ^= crc16poly;
3836                                 crc |= (u16)0x0001U;
3837                         }
3838                         data >>= 1;
3839                 }
3840         }
3841
3842         return crc;
3843 }
3844
3845 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3846 {
3847         u32 buf;
3848         int ret;
3849         int mask_index;
3850         u16 crc;
3851         u32 temp_wucsr;
3852         u32 temp_pmt_ctl;
3853         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3854         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3855         const u8 arp_type[2] = { 0x08, 0x06 };
3856
3857         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3858         buf &= ~MAC_TX_TXEN_;
3859         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3860         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3861         buf &= ~MAC_RX_RXEN_;
3862         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3863
3864         ret = lan78xx_write_reg(dev, WUCSR, 0);
3865         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3866         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3867
3868         temp_wucsr = 0;
3869
3870         temp_pmt_ctl = 0;
3871         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3872         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3873         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3874
3875         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3876                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3877
3878         mask_index = 0;
3879         if (wol & WAKE_PHY) {
3880                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3881
3882                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3883                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3884                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3885         }
3886         if (wol & WAKE_MAGIC) {
3887                 temp_wucsr |= WUCSR_MPEN_;
3888
3889                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3890                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3891                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3892         }
3893         if (wol & WAKE_BCAST) {
3894                 temp_wucsr |= WUCSR_BCST_EN_;
3895
3896                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3897                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3898                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3899         }
3900         if (wol & WAKE_MCAST) {
3901                 temp_wucsr |= WUCSR_WAKE_EN_;
3902
3903                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3904                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3905                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3906                                         WUF_CFGX_EN_ |
3907                                         WUF_CFGX_TYPE_MCAST_ |
3908                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3909                                         (crc & WUF_CFGX_CRC16_MASK_));
3910
3911                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3912                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3913                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3914                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3915                 mask_index++;
3916
3917                 /* for IPv6 Multicast */
3918                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3919                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3920                                         WUF_CFGX_EN_ |
3921                                         WUF_CFGX_TYPE_MCAST_ |
3922                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3923                                         (crc & WUF_CFGX_CRC16_MASK_));
3924
3925                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3926                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3927                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3928                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3929                 mask_index++;
3930
3931                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3932                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3933                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3934         }
3935         if (wol & WAKE_UCAST) {
3936                 temp_wucsr |= WUCSR_PFDA_EN_;
3937
3938                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3939                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3940                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3941         }
3942         if (wol & WAKE_ARP) {
3943                 temp_wucsr |= WUCSR_WAKE_EN_;
3944
3945                 /* set WUF_CFG & WUF_MASK
3946                  * for packettype (offset 12,13) = ARP (0x0806)
3947                  */
3948                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3949                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3950                                         WUF_CFGX_EN_ |
3951                                         WUF_CFGX_TYPE_ALL_ |
3952                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3953                                         (crc & WUF_CFGX_CRC16_MASK_));
3954
3955                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3956                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3957                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3958                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3959                 mask_index++;
3960
3961                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3962                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3963                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3964         }
3965
3966         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3967
3968         /* when multiple WOL bits are set */
3969         if (hweight_long((unsigned long)wol) > 1) {
3970                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3971                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3972                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3973         }
3974         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3975
3976         /* clear WUPS */
3977         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3978         buf |= PMT_CTL_WUPS_MASK_;
3979         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3980
3981         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3982         buf |= MAC_RX_RXEN_;
3983         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3984
3985         return 0;
3986 }
3987
3988 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3989 {
3990         struct lan78xx_net *dev = usb_get_intfdata(intf);
3991         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3992         u32 buf;
3993         int ret;
3994         int event;
3995
3996         event = message.event;
3997
3998         if (!dev->suspend_count++) {
3999                 spin_lock_irq(&dev->txq.lock);
4000                 /* don't autosuspend while transmitting */
4001                 if ((skb_queue_len(&dev->txq) ||
4002                      skb_queue_len(&dev->txq_pend)) &&
4003                         PMSG_IS_AUTO(message)) {
4004                         spin_unlock_irq(&dev->txq.lock);
4005                         ret = -EBUSY;
4006                         goto out;
4007                 } else {
4008                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4009                         spin_unlock_irq(&dev->txq.lock);
4010                 }
4011
4012                 /* stop TX & RX */
4013                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4014                 buf &= ~MAC_TX_TXEN_;
4015                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4016                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4017                 buf &= ~MAC_RX_RXEN_;
4018                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4019
4020                 /* empty out the rx and queues */
4021                 netif_device_detach(dev->net);
4022                 lan78xx_terminate_urbs(dev);
4023                 usb_kill_urb(dev->urb_intr);
4024
4025                 /* reattach */
4026                 netif_device_attach(dev->net);
4027         }
4028
4029         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4030                 del_timer(&dev->stat_monitor);
4031
4032                 if (PMSG_IS_AUTO(message)) {
4033                         /* auto suspend (selective suspend) */
4034                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4035                         buf &= ~MAC_TX_TXEN_;
4036                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4037                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4038                         buf &= ~MAC_RX_RXEN_;
4039                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4040
4041                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4042                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4043                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4044
4045                         /* set goodframe wakeup */
4046                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4047
4048                         buf |= WUCSR_RFE_WAKE_EN_;
4049                         buf |= WUCSR_STORE_WAKE_;
4050
4051                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4052
4053                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4054
4055                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4056                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4057
4058                         buf |= PMT_CTL_PHY_WAKE_EN_;
4059                         buf |= PMT_CTL_WOL_EN_;
4060                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4061                         buf |= PMT_CTL_SUS_MODE_3_;
4062
4063                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4064
4065                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4066
4067                         buf |= PMT_CTL_WUPS_MASK_;
4068
4069                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4070
4071                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4072                         buf |= MAC_RX_RXEN_;
4073                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4074                 } else {
4075                         lan78xx_set_suspend(dev, pdata->wol);
4076                 }
4077         }
4078
4079         ret = 0;
4080 out:
4081         return ret;
4082 }
4083
4084 static int lan78xx_resume(struct usb_interface *intf)
4085 {
4086         struct lan78xx_net *dev = usb_get_intfdata(intf);
4087         struct sk_buff *skb;
4088         struct urb *res;
4089         int ret;
4090         u32 buf;
4091
4092         if (!timer_pending(&dev->stat_monitor)) {
4093                 dev->delta = 1;
4094                 mod_timer(&dev->stat_monitor,
4095                           jiffies + STAT_UPDATE_TIMER);
4096         }
4097
4098         if (!--dev->suspend_count) {
4099                 /* resume interrupt URBs */
4100                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4101                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4102
4103                 spin_lock_irq(&dev->txq.lock);
4104                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4105                         skb = (struct sk_buff *)res->context;
4106                         ret = usb_submit_urb(res, GFP_ATOMIC);
4107                         if (ret < 0) {
4108                                 dev_kfree_skb_any(skb);
4109                                 usb_free_urb(res);
4110                                 usb_autopm_put_interface_async(dev->intf);
4111                         } else {
4112                                 netif_trans_update(dev->net);
4113                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4114                         }
4115                 }
4116
4117                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4118                 spin_unlock_irq(&dev->txq.lock);
4119
4120                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4121                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4122                                 netif_start_queue(dev->net);
4123                         tasklet_schedule(&dev->bh);
4124                 }
4125         }
4126
4127         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4128         ret = lan78xx_write_reg(dev, WUCSR, 0);
4129         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4130
4131         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4132                                              WUCSR2_ARP_RCD_ |
4133                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4134                                              WUCSR2_IPV4_TCPSYN_RCD_);
4135
4136         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4137                                             WUCSR_EEE_RX_WAKE_ |
4138                                             WUCSR_PFDA_FR_ |
4139                                             WUCSR_RFE_WAKE_FR_ |
4140                                             WUCSR_WUFR_ |
4141                                             WUCSR_MPR_ |
4142                                             WUCSR_BCST_FR_);
4143
4144         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4145         buf |= MAC_TX_TXEN_;
4146         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4147
4148         return 0;
4149 }
4150
4151 static int lan78xx_reset_resume(struct usb_interface *intf)
4152 {
4153         struct lan78xx_net *dev = usb_get_intfdata(intf);
4154
4155         lan78xx_reset(dev);
4156
4157         phy_start(dev->net->phydev);
4158
4159         return lan78xx_resume(intf);
4160 }
4161
4162 static const struct usb_device_id products[] = {
4163         {
4164         /* LAN7800 USB Gigabit Ethernet Device */
4165         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4166         },
4167         {
4168         /* LAN7850 USB Gigabit Ethernet Device */
4169         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4170         },
4171         {
4172         /* LAN7801 USB Gigabit Ethernet Device */
4173         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4174         },
4175         {
4176         /* ATM2-AF USB Gigabit Ethernet Device */
4177         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4178         },
4179         {},
4180 };
4181 MODULE_DEVICE_TABLE(usb, products);
4182
4183 static struct usb_driver lan78xx_driver = {
4184         .name                   = DRIVER_NAME,
4185         .id_table               = products,
4186         .probe                  = lan78xx_probe,
4187         .disconnect             = lan78xx_disconnect,
4188         .suspend                = lan78xx_suspend,
4189         .resume                 = lan78xx_resume,
4190         .reset_resume           = lan78xx_reset_resume,
4191         .supports_autosuspend   = 1,
4192         .disable_hub_initiated_lpm = 1,
4193 };
4194
4195 module_usb_driver(lan78xx_driver);
4196
4197 MODULE_AUTHOR(DRIVER_AUTHOR);
4198 MODULE_DESCRIPTION(DRIVER_DESC);
4199 MODULE_LICENSE("GPL");