GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43
44 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME     "lan78xx"
47 #define DRIVER_VERSION  "1.0.6"
48
49 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
50 #define THROTTLE_JIFFIES                (HZ / 8)
51 #define UNLINK_TIMEOUT_MS               3
52
53 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
54
55 #define SS_USB_PKT_SIZE                 (1024)
56 #define HS_USB_PKT_SIZE                 (512)
57 #define FS_USB_PKT_SIZE                 (64)
58
59 #define MAX_RX_FIFO_SIZE                (12 * 1024)
60 #define MAX_TX_FIFO_SIZE                (12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY           (0x0800)
63 #define MAX_SINGLE_PACKET_SIZE          (9000)
64 #define DEFAULT_TX_CSUM_ENABLE          (true)
65 #define DEFAULT_RX_CSUM_ENABLE          (true)
66 #define DEFAULT_TSO_CSUM_ENABLE         (true)
67 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
68 #define TX_OVERHEAD                     (8)
69 #define RXW_PADDING                     2
70
71 #define LAN78XX_USB_VENDOR_ID           (0x0424)
72 #define LAN7800_USB_PRODUCT_ID          (0x7800)
73 #define LAN7850_USB_PRODUCT_ID          (0x7850)
74 #define LAN7801_USB_PRODUCT_ID          (0x7801)
75 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
76 #define LAN78XX_OTP_MAGIC               (0x78F3)
77 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
78 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
79
80 #define MII_READ                        1
81 #define MII_WRITE                       0
82
83 #define EEPROM_INDICATOR                (0xA5)
84 #define EEPROM_MAC_OFFSET               (0x01)
85 #define MAX_EEPROM_SIZE                 512
86 #define OTP_INDICATOR_1                 (0xF3)
87 #define OTP_INDICATOR_2                 (0xF7)
88
89 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
90                                          WAKE_MCAST | WAKE_BCAST | \
91                                          WAKE_ARP | WAKE_MAGIC)
92
93 /* USB related defines */
94 #define BULK_IN_PIPE                    1
95 #define BULK_OUT_PIPE                   2
96
97 /* default autosuspend delay (mSec)*/
98 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
99
100 /* statistic update interval (mSec) */
101 #define STAT_UPDATE_TIMER               (1 * 1000)
102
103 /* defines interrupts from interrupt EP */
104 #define MAX_INT_EP                      (32)
105 #define INT_EP_INTEP                    (31)
106 #define INT_EP_OTP_WR_DONE              (28)
107 #define INT_EP_EEE_TX_LPI_START         (26)
108 #define INT_EP_EEE_TX_LPI_STOP          (25)
109 #define INT_EP_EEE_RX_LPI               (24)
110 #define INT_EP_MAC_RESET_TIMEOUT        (23)
111 #define INT_EP_RDFO                     (22)
112 #define INT_EP_TXE                      (21)
113 #define INT_EP_USB_STATUS               (20)
114 #define INT_EP_TX_DIS                   (19)
115 #define INT_EP_RX_DIS                   (18)
116 #define INT_EP_PHY                      (17)
117 #define INT_EP_DP                       (16)
118 #define INT_EP_MAC_ERR                  (15)
119 #define INT_EP_TDFU                     (14)
120 #define INT_EP_TDFO                     (13)
121 #define INT_EP_UTX                      (12)
122 #define INT_EP_GPIO_11                  (11)
123 #define INT_EP_GPIO_10                  (10)
124 #define INT_EP_GPIO_9                   (9)
125 #define INT_EP_GPIO_8                   (8)
126 #define INT_EP_GPIO_7                   (7)
127 #define INT_EP_GPIO_6                   (6)
128 #define INT_EP_GPIO_5                   (5)
129 #define INT_EP_GPIO_4                   (4)
130 #define INT_EP_GPIO_3                   (3)
131 #define INT_EP_GPIO_2                   (2)
132 #define INT_EP_GPIO_1                   (1)
133 #define INT_EP_GPIO_0                   (0)
134
135 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
136         "RX FCS Errors",
137         "RX Alignment Errors",
138         "Rx Fragment Errors",
139         "RX Jabber Errors",
140         "RX Undersize Frame Errors",
141         "RX Oversize Frame Errors",
142         "RX Dropped Frames",
143         "RX Unicast Byte Count",
144         "RX Broadcast Byte Count",
145         "RX Multicast Byte Count",
146         "RX Unicast Frames",
147         "RX Broadcast Frames",
148         "RX Multicast Frames",
149         "RX Pause Frames",
150         "RX 64 Byte Frames",
151         "RX 65 - 127 Byte Frames",
152         "RX 128 - 255 Byte Frames",
153         "RX 256 - 511 Bytes Frames",
154         "RX 512 - 1023 Byte Frames",
155         "RX 1024 - 1518 Byte Frames",
156         "RX Greater 1518 Byte Frames",
157         "EEE RX LPI Transitions",
158         "EEE RX LPI Time",
159         "TX FCS Errors",
160         "TX Excess Deferral Errors",
161         "TX Carrier Errors",
162         "TX Bad Byte Count",
163         "TX Single Collisions",
164         "TX Multiple Collisions",
165         "TX Excessive Collision",
166         "TX Late Collisions",
167         "TX Unicast Byte Count",
168         "TX Broadcast Byte Count",
169         "TX Multicast Byte Count",
170         "TX Unicast Frames",
171         "TX Broadcast Frames",
172         "TX Multicast Frames",
173         "TX Pause Frames",
174         "TX 64 Byte Frames",
175         "TX 65 - 127 Byte Frames",
176         "TX 128 - 255 Byte Frames",
177         "TX 256 - 511 Bytes Frames",
178         "TX 512 - 1023 Byte Frames",
179         "TX 1024 - 1518 Byte Frames",
180         "TX Greater 1518 Byte Frames",
181         "EEE TX LPI Transitions",
182         "EEE TX LPI Time",
183 };
184
185 struct lan78xx_statstage {
186         u32 rx_fcs_errors;
187         u32 rx_alignment_errors;
188         u32 rx_fragment_errors;
189         u32 rx_jabber_errors;
190         u32 rx_undersize_frame_errors;
191         u32 rx_oversize_frame_errors;
192         u32 rx_dropped_frames;
193         u32 rx_unicast_byte_count;
194         u32 rx_broadcast_byte_count;
195         u32 rx_multicast_byte_count;
196         u32 rx_unicast_frames;
197         u32 rx_broadcast_frames;
198         u32 rx_multicast_frames;
199         u32 rx_pause_frames;
200         u32 rx_64_byte_frames;
201         u32 rx_65_127_byte_frames;
202         u32 rx_128_255_byte_frames;
203         u32 rx_256_511_bytes_frames;
204         u32 rx_512_1023_byte_frames;
205         u32 rx_1024_1518_byte_frames;
206         u32 rx_greater_1518_byte_frames;
207         u32 eee_rx_lpi_transitions;
208         u32 eee_rx_lpi_time;
209         u32 tx_fcs_errors;
210         u32 tx_excess_deferral_errors;
211         u32 tx_carrier_errors;
212         u32 tx_bad_byte_count;
213         u32 tx_single_collisions;
214         u32 tx_multiple_collisions;
215         u32 tx_excessive_collision;
216         u32 tx_late_collisions;
217         u32 tx_unicast_byte_count;
218         u32 tx_broadcast_byte_count;
219         u32 tx_multicast_byte_count;
220         u32 tx_unicast_frames;
221         u32 tx_broadcast_frames;
222         u32 tx_multicast_frames;
223         u32 tx_pause_frames;
224         u32 tx_64_byte_frames;
225         u32 tx_65_127_byte_frames;
226         u32 tx_128_255_byte_frames;
227         u32 tx_256_511_bytes_frames;
228         u32 tx_512_1023_byte_frames;
229         u32 tx_1024_1518_byte_frames;
230         u32 tx_greater_1518_byte_frames;
231         u32 eee_tx_lpi_transitions;
232         u32 eee_tx_lpi_time;
233 };
234
235 struct lan78xx_statstage64 {
236         u64 rx_fcs_errors;
237         u64 rx_alignment_errors;
238         u64 rx_fragment_errors;
239         u64 rx_jabber_errors;
240         u64 rx_undersize_frame_errors;
241         u64 rx_oversize_frame_errors;
242         u64 rx_dropped_frames;
243         u64 rx_unicast_byte_count;
244         u64 rx_broadcast_byte_count;
245         u64 rx_multicast_byte_count;
246         u64 rx_unicast_frames;
247         u64 rx_broadcast_frames;
248         u64 rx_multicast_frames;
249         u64 rx_pause_frames;
250         u64 rx_64_byte_frames;
251         u64 rx_65_127_byte_frames;
252         u64 rx_128_255_byte_frames;
253         u64 rx_256_511_bytes_frames;
254         u64 rx_512_1023_byte_frames;
255         u64 rx_1024_1518_byte_frames;
256         u64 rx_greater_1518_byte_frames;
257         u64 eee_rx_lpi_transitions;
258         u64 eee_rx_lpi_time;
259         u64 tx_fcs_errors;
260         u64 tx_excess_deferral_errors;
261         u64 tx_carrier_errors;
262         u64 tx_bad_byte_count;
263         u64 tx_single_collisions;
264         u64 tx_multiple_collisions;
265         u64 tx_excessive_collision;
266         u64 tx_late_collisions;
267         u64 tx_unicast_byte_count;
268         u64 tx_broadcast_byte_count;
269         u64 tx_multicast_byte_count;
270         u64 tx_unicast_frames;
271         u64 tx_broadcast_frames;
272         u64 tx_multicast_frames;
273         u64 tx_pause_frames;
274         u64 tx_64_byte_frames;
275         u64 tx_65_127_byte_frames;
276         u64 tx_128_255_byte_frames;
277         u64 tx_256_511_bytes_frames;
278         u64 tx_512_1023_byte_frames;
279         u64 tx_1024_1518_byte_frames;
280         u64 tx_greater_1518_byte_frames;
281         u64 eee_tx_lpi_transitions;
282         u64 eee_tx_lpi_time;
283 };
284
285 struct lan78xx_net;
286
287 struct lan78xx_priv {
288         struct lan78xx_net *dev;
289         u32 rfe_ctl;
290         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
291         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
292         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
293         struct mutex dataport_mutex; /* for dataport access */
294         spinlock_t rfe_ctl_lock; /* for rfe register access */
295         struct work_struct set_multicast;
296         struct work_struct set_vlan;
297         u32 wol;
298 };
299
300 enum skb_state {
301         illegal = 0,
302         tx_start,
303         tx_done,
304         rx_start,
305         rx_done,
306         rx_cleanup,
307         unlink_start
308 };
309
310 struct skb_data {               /* skb->cb is one of these */
311         struct urb *urb;
312         struct lan78xx_net *dev;
313         enum skb_state state;
314         size_t length;
315         int num_of_packet;
316 };
317
318 struct usb_context {
319         struct usb_ctrlrequest req;
320         struct lan78xx_net *dev;
321 };
322
323 #define EVENT_TX_HALT                   0
324 #define EVENT_RX_HALT                   1
325 #define EVENT_RX_MEMORY                 2
326 #define EVENT_STS_SPLIT                 3
327 #define EVENT_LINK_RESET                4
328 #define EVENT_RX_PAUSED                 5
329 #define EVENT_DEV_WAKING                6
330 #define EVENT_DEV_ASLEEP                7
331 #define EVENT_DEV_OPEN                  8
332 #define EVENT_STAT_UPDATE               9
333
334 struct statstage {
335         struct mutex                    access_lock;    /* for stats access */
336         struct lan78xx_statstage        saved;
337         struct lan78xx_statstage        rollover_count;
338         struct lan78xx_statstage        rollover_max;
339         struct lan78xx_statstage64      curr_stat;
340 };
341
342 struct irq_domain_data {
343         struct irq_domain       *irqdomain;
344         unsigned int            phyirq;
345         struct irq_chip         *irqchip;
346         irq_flow_handler_t      irq_handler;
347         u32                     irqenable;
348         struct mutex            irq_lock;               /* for irq bus access */
349 };
350
351 struct lan78xx_net {
352         struct net_device       *net;
353         struct usb_device       *udev;
354         struct usb_interface    *intf;
355         void                    *driver_priv;
356
357         int                     rx_qlen;
358         int                     tx_qlen;
359         struct sk_buff_head     rxq;
360         struct sk_buff_head     txq;
361         struct sk_buff_head     done;
362         struct sk_buff_head     rxq_pause;
363         struct sk_buff_head     txq_pend;
364
365         struct tasklet_struct   bh;
366         struct delayed_work     wq;
367
368         int                     msg_enable;
369
370         struct urb              *urb_intr;
371         struct usb_anchor       deferred;
372
373         struct mutex            phy_mutex; /* for phy access */
374         unsigned                pipe_in, pipe_out, pipe_intr;
375
376         u32                     hard_mtu;       /* count any extra framing */
377         size_t                  rx_urb_size;    /* size for rx urbs */
378
379         unsigned long           flags;
380
381         wait_queue_head_t       *wait;
382         unsigned char           suspend_count;
383
384         unsigned                maxpacket;
385         struct timer_list       delay;
386         struct timer_list       stat_monitor;
387
388         unsigned long           data[5];
389
390         int                     link_on;
391         u8                      mdix_ctrl;
392
393         u32                     chipid;
394         u32                     chiprev;
395         struct mii_bus          *mdiobus;
396         phy_interface_t         interface;
397
398         int                     fc_autoneg;
399         u8                      fc_request_control;
400
401         int                     delta;
402         struct statstage        stats;
403
404         struct irq_domain_data  domain_data;
405 };
406
407 /* define external phy id */
408 #define PHY_LAN8835                     (0x0007C130)
409 #define PHY_KSZ9031RNX                  (0x00221620)
410
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419         int ret;
420
421         if (!buf)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425                               USB_VENDOR_REQUEST_READ_REGISTER,
426                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428         if (likely(ret >= 0)) {
429                 le32_to_cpus(buf);
430                 *data = *buf;
431         } else {
432                 netdev_warn(dev->net,
433                             "Failed to read register index 0x%08x. ret = %d",
434                             index, ret);
435         }
436
437         kfree(buf);
438
439         return ret;
440 }
441
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445         int ret;
446
447         if (!buf)
448                 return -ENOMEM;
449
450         *buf = data;
451         cpu_to_le32s(buf);
452
453         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454                               USB_VENDOR_REQUEST_WRITE_REGISTER,
455                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457         if (unlikely(ret < 0)) {
458                 netdev_warn(dev->net,
459                             "Failed to write register index 0x%08x. ret = %d",
460                             index, ret);
461         }
462
463         kfree(buf);
464
465         return ret;
466 }
467
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469                               struct lan78xx_statstage *data)
470 {
471         int ret = 0;
472         int i;
473         struct lan78xx_statstage *stats;
474         u32 *src;
475         u32 *dst;
476
477         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478         if (!stats)
479                 return -ENOMEM;
480
481         ret = usb_control_msg(dev->udev,
482                               usb_rcvctrlpipe(dev->udev, 0),
483                               USB_VENDOR_REQUEST_GET_STATS,
484                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485                               0,
486                               0,
487                               (void *)stats,
488                               sizeof(*stats),
489                               USB_CTRL_SET_TIMEOUT);
490         if (likely(ret >= 0)) {
491                 src = (u32 *)stats;
492                 dst = (u32 *)data;
493                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494                         le32_to_cpus(&src[i]);
495                         dst[i] = src[i];
496                 }
497         } else {
498                 netdev_warn(dev->net,
499                             "Failed to read stat ret = %d", ret);
500         }
501
502         kfree(stats);
503
504         return ret;
505 }
506
507 #define check_counter_rollover(struct1, dev_stats, member) {    \
508         if (struct1->member < dev_stats.saved.member)           \
509                 dev_stats.rollover_count.member++;              \
510         }
511
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513                                         struct lan78xx_statstage *stats)
514 {
515         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528         check_counter_rollover(stats, dev->stats, rx_pause_frames);
529         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542         check_counter_rollover(stats, dev->stats, tx_single_collisions);
543         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545         check_counter_rollover(stats, dev->stats, tx_late_collisions);
546         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552         check_counter_rollover(stats, dev->stats, tx_pause_frames);
553         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562
563         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568         u32 *p, *count, *max;
569         u64 *data;
570         int i;
571         struct lan78xx_statstage lan78xx_stats;
572
573         if (usb_autopm_get_interface(dev->intf) < 0)
574                 return;
575
576         p = (u32 *)&lan78xx_stats;
577         count = (u32 *)&dev->stats.rollover_count;
578         max = (u32 *)&dev->stats.rollover_max;
579         data = (u64 *)&dev->stats.curr_stat;
580
581         mutex_lock(&dev->stats.access_lock);
582
583         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585
586         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588
589         mutex_unlock(&dev->stats.access_lock);
590
591         usb_autopm_put_interface(dev->intf);
592 }
593
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597         unsigned long start_time = jiffies;
598         u32 val;
599         int ret;
600
601         do {
602                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603                 if (unlikely(ret < 0))
604                         return -EIO;
605
606                 if (!(val & MII_ACC_MII_BUSY_))
607                         return 0;
608         } while (!time_after(jiffies, start_time + HZ));
609
610         return -EIO;
611 }
612
613 static inline u32 mii_access(int id, int index, int read)
614 {
615         u32 ret;
616
617         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619         if (read)
620                 ret |= MII_ACC_MII_READ_;
621         else
622                 ret |= MII_ACC_MII_WRITE_;
623         ret |= MII_ACC_MII_BUSY_;
624
625         return ret;
626 }
627
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630         unsigned long start_time = jiffies;
631         u32 val;
632         int ret;
633
634         do {
635                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636                 if (unlikely(ret < 0))
637                         return -EIO;
638
639                 if (!(val & E2P_CMD_EPC_BUSY_) ||
640                     (val & E2P_CMD_EPC_TIMEOUT_))
641                         break;
642                 usleep_range(40, 100);
643         } while (!time_after(jiffies, start_time + HZ));
644
645         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646                 netdev_warn(dev->net, "EEPROM read operation timeout");
647                 return -EIO;
648         }
649
650         return 0;
651 }
652
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_))
665                         return 0;
666
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         netdev_warn(dev->net, "EEPROM is busy");
671         return -EIO;
672 }
673
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675                                    u32 length, u8 *data)
676 {
677         u32 val;
678         u32 saved;
679         int i, ret;
680         int retval;
681
682         /* depends on chip, some EEPROM pins are muxed with LED function.
683          * disable & restore LED function to access EEPROM.
684          */
685         ret = lan78xx_read_reg(dev, HW_CFG, &val);
686         saved = val;
687         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689                 ret = lan78xx_write_reg(dev, HW_CFG, val);
690         }
691
692         retval = lan78xx_eeprom_confirm_not_busy(dev);
693         if (retval)
694                 return retval;
695
696         for (i = 0; i < length; i++) {
697                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700                 if (unlikely(ret < 0)) {
701                         retval = -EIO;
702                         goto exit;
703                 }
704
705                 retval = lan78xx_wait_eeprom(dev);
706                 if (retval < 0)
707                         goto exit;
708
709                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710                 if (unlikely(ret < 0)) {
711                         retval = -EIO;
712                         goto exit;
713                 }
714
715                 data[i] = val & 0xFF;
716                 offset++;
717         }
718
719         retval = 0;
720 exit:
721         if (dev->chipid == ID_REV_CHIP_ID_7800_)
722                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
723
724         return retval;
725 }
726
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728                                u32 length, u8 *data)
729 {
730         u8 sig;
731         int ret;
732
733         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734         if ((ret == 0) && (sig == EEPROM_INDICATOR))
735                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736         else
737                 ret = -EINVAL;
738
739         return ret;
740 }
741
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743                                     u32 length, u8 *data)
744 {
745         u32 val;
746         u32 saved;
747         int i, ret;
748         int retval;
749
750         /* depends on chip, some EEPROM pins are muxed with LED function.
751          * disable & restore LED function to access EEPROM.
752          */
753         ret = lan78xx_read_reg(dev, HW_CFG, &val);
754         saved = val;
755         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757                 ret = lan78xx_write_reg(dev, HW_CFG, val);
758         }
759
760         retval = lan78xx_eeprom_confirm_not_busy(dev);
761         if (retval)
762                 goto exit;
763
764         /* Issue write/erase enable command */
765         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766         ret = lan78xx_write_reg(dev, E2P_CMD, val);
767         if (unlikely(ret < 0)) {
768                 retval = -EIO;
769                 goto exit;
770         }
771
772         retval = lan78xx_wait_eeprom(dev);
773         if (retval < 0)
774                 goto exit;
775
776         for (i = 0; i < length; i++) {
777                 /* Fill data register */
778                 val = data[i];
779                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
780                 if (ret < 0) {
781                         retval = -EIO;
782                         goto exit;
783                 }
784
785                 /* Send "write" command */
786                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
789                 if (ret < 0) {
790                         retval = -EIO;
791                         goto exit;
792                 }
793
794                 retval = lan78xx_wait_eeprom(dev);
795                 if (retval < 0)
796                         goto exit;
797
798                 offset++;
799         }
800
801         retval = 0;
802 exit:
803         if (dev->chipid == ID_REV_CHIP_ID_7800_)
804                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
805
806         return retval;
807 }
808
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810                                 u32 length, u8 *data)
811 {
812         int i;
813         int ret;
814         u32 buf;
815         unsigned long timeout;
816
817         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818
819         if (buf & OTP_PWR_DN_PWRDN_N_) {
820                 /* clear it and wait to be cleared */
821                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822
823                 timeout = jiffies + HZ;
824                 do {
825                         usleep_range(1, 10);
826                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827                         if (time_after(jiffies, timeout)) {
828                                 netdev_warn(dev->net,
829                                             "timeout on OTP_PWR_DN");
830                                 return -EIO;
831                         }
832                 } while (buf & OTP_PWR_DN_PWRDN_N_);
833         }
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840
841                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "timeout on OTP_STATUS");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854
855                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856
857                 data[i] = (u8)(buf & 0xFF);
858         }
859
860         return 0;
861 }
862
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864                                  u32 length, u8 *data)
865 {
866         int i;
867         int ret;
868         u32 buf;
869         unsigned long timeout;
870
871         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872
873         if (buf & OTP_PWR_DN_PWRDN_N_) {
874                 /* clear it and wait to be cleared */
875                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876
877                 timeout = jiffies + HZ;
878                 do {
879                         udelay(1);
880                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881                         if (time_after(jiffies, timeout)) {
882                                 netdev_warn(dev->net,
883                                             "timeout on OTP_PWR_DN completion");
884                                 return -EIO;
885                         }
886                 } while (buf & OTP_PWR_DN_PWRDN_N_);
887         }
888
889         /* set to BYTE program mode */
890         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891
892         for (i = 0; i < length; i++) {
893                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
895                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896                                         ((offset + i) & OTP_ADDR2_10_3));
897                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900
901                 timeout = jiffies + HZ;
902                 do {
903                         udelay(1);
904                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905                         if (time_after(jiffies, timeout)) {
906                                 netdev_warn(dev->net,
907                                             "Timeout on OTP_STATUS completion");
908                                 return -EIO;
909                         }
910                 } while (buf & OTP_STATUS_BUSY_);
911         }
912
913         return 0;
914 }
915
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917                             u32 length, u8 *data)
918 {
919         u8 sig;
920         int ret;
921
922         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923
924         if (ret == 0) {
925                 if (sig == OTP_INDICATOR_2)
926                         offset += 0x100;
927                 else if (sig != OTP_INDICATOR_1)
928                         ret = -EINVAL;
929                 if (!ret)
930                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
931         }
932
933         return ret;
934 }
935
936 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
937 {
938         int i, ret;
939
940         for (i = 0; i < 100; i++) {
941                 u32 dp_sel;
942
943                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
944                 if (unlikely(ret < 0))
945                         return -EIO;
946
947                 if (dp_sel & DP_SEL_DPRDY_)
948                         return 0;
949
950                 usleep_range(40, 100);
951         }
952
953         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
954
955         return -EIO;
956 }
957
958 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
959                                   u32 addr, u32 length, u32 *buf)
960 {
961         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
962         u32 dp_sel;
963         int i, ret;
964
965         if (usb_autopm_get_interface(dev->intf) < 0)
966                         return 0;
967
968         mutex_lock(&pdata->dataport_mutex);
969
970         ret = lan78xx_dataport_wait_not_busy(dev);
971         if (ret < 0)
972                 goto done;
973
974         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
975
976         dp_sel &= ~DP_SEL_RSEL_MASK_;
977         dp_sel |= ram_select;
978         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
979
980         for (i = 0; i < length; i++) {
981                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
982
983                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
984
985                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
986
987                 ret = lan78xx_dataport_wait_not_busy(dev);
988                 if (ret < 0)
989                         goto done;
990         }
991
992 done:
993         mutex_unlock(&pdata->dataport_mutex);
994         usb_autopm_put_interface(dev->intf);
995
996         return ret;
997 }
998
999 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1000                                     int index, u8 addr[ETH_ALEN])
1001 {
1002         u32     temp;
1003
1004         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1005                 temp = addr[3];
1006                 temp = addr[2] | (temp << 8);
1007                 temp = addr[1] | (temp << 8);
1008                 temp = addr[0] | (temp << 8);
1009                 pdata->pfilter_table[index][1] = temp;
1010                 temp = addr[5];
1011                 temp = addr[4] | (temp << 8);
1012                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1013                 pdata->pfilter_table[index][0] = temp;
1014         }
1015 }
1016
1017 /* returns hash bit number for given MAC address */
1018 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1019 {
1020         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1021 }
1022
1023 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1024 {
1025         struct lan78xx_priv *pdata =
1026                         container_of(param, struct lan78xx_priv, set_multicast);
1027         struct lan78xx_net *dev = pdata->dev;
1028         int i;
1029         int ret;
1030
1031         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1032                   pdata->rfe_ctl);
1033
1034         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1035                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1036
1037         for (i = 1; i < NUM_OF_MAF; i++) {
1038                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1039                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1040                                         pdata->pfilter_table[i][1]);
1041                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1042                                         pdata->pfilter_table[i][0]);
1043         }
1044
1045         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1046 }
1047
1048 static void lan78xx_set_multicast(struct net_device *netdev)
1049 {
1050         struct lan78xx_net *dev = netdev_priv(netdev);
1051         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1052         unsigned long flags;
1053         int i;
1054
1055         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1056
1057         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1058                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1059
1060         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1061                         pdata->mchash_table[i] = 0;
1062         /* pfilter_table[0] has own HW address */
1063         for (i = 1; i < NUM_OF_MAF; i++) {
1064                         pdata->pfilter_table[i][0] =
1065                         pdata->pfilter_table[i][1] = 0;
1066         }
1067
1068         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1069
1070         if (dev->net->flags & IFF_PROMISC) {
1071                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1072                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1073         } else {
1074                 if (dev->net->flags & IFF_ALLMULTI) {
1075                         netif_dbg(dev, drv, dev->net,
1076                                   "receive all multicast enabled");
1077                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1078                 }
1079         }
1080
1081         if (netdev_mc_count(dev->net)) {
1082                 struct netdev_hw_addr *ha;
1083                 int i;
1084
1085                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1086
1087                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1088
1089                 i = 1;
1090                 netdev_for_each_mc_addr(ha, netdev) {
1091                         /* set first 32 into Perfect Filter */
1092                         if (i < 33) {
1093                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1094                         } else {
1095                                 u32 bitnum = lan78xx_hash(ha->addr);
1096
1097                                 pdata->mchash_table[bitnum / 32] |=
1098                                                         (1 << (bitnum % 32));
1099                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1100                         }
1101                         i++;
1102                 }
1103         }
1104
1105         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1106
1107         /* defer register writes to a sleepable context */
1108         schedule_work(&pdata->set_multicast);
1109 }
1110
1111 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1112                                       u16 lcladv, u16 rmtadv)
1113 {
1114         u32 flow = 0, fct_flow = 0;
1115         int ret;
1116         u8 cap;
1117
1118         if (dev->fc_autoneg)
1119                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1120         else
1121                 cap = dev->fc_request_control;
1122
1123         if (cap & FLOW_CTRL_TX)
1124                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1125
1126         if (cap & FLOW_CTRL_RX)
1127                 flow |= FLOW_CR_RX_FCEN_;
1128
1129         if (dev->udev->speed == USB_SPEED_SUPER)
1130                 fct_flow = 0x817;
1131         else if (dev->udev->speed == USB_SPEED_HIGH)
1132                 fct_flow = 0x211;
1133
1134         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1135                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1136                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1137
1138         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1139
1140         /* threshold value should be set before enabling flow */
1141         ret = lan78xx_write_reg(dev, FLOW, flow);
1142
1143         return 0;
1144 }
1145
1146 static int lan78xx_link_reset(struct lan78xx_net *dev)
1147 {
1148         struct phy_device *phydev = dev->net->phydev;
1149         struct ethtool_link_ksettings ecmd;
1150         int ladv, radv, ret, link;
1151         u32 buf;
1152
1153         /* clear LAN78xx interrupt status */
1154         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1155         if (unlikely(ret < 0))
1156                 return -EIO;
1157
1158         mutex_lock(&phydev->lock);
1159         phy_read_status(phydev);
1160         link = phydev->link;
1161         mutex_unlock(&phydev->lock);
1162
1163         if (!link && dev->link_on) {
1164                 dev->link_on = false;
1165
1166                 /* reset MAC */
1167                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1168                 if (unlikely(ret < 0))
1169                         return -EIO;
1170                 buf |= MAC_CR_RST_;
1171                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1172                 if (unlikely(ret < 0))
1173                         return -EIO;
1174
1175                 del_timer(&dev->stat_monitor);
1176         } else if (link && !dev->link_on) {
1177                 dev->link_on = true;
1178
1179                 phy_ethtool_ksettings_get(phydev, &ecmd);
1180
1181                 if (dev->udev->speed == USB_SPEED_SUPER) {
1182                         if (ecmd.base.speed == 1000) {
1183                                 /* disable U2 */
1184                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1185                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1186                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187                                 /* enable U1 */
1188                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1189                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1190                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1191                         } else {
1192                                 /* enable U1 & U2 */
1193                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1194                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1195                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1196                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1197                         }
1198                 }
1199
1200                 ladv = phy_read(phydev, MII_ADVERTISE);
1201                 if (ladv < 0)
1202                         return ladv;
1203
1204                 radv = phy_read(phydev, MII_LPA);
1205                 if (radv < 0)
1206                         return radv;
1207
1208                 netif_dbg(dev, link, dev->net,
1209                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1210                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1211
1212                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1213                                                  radv);
1214
1215                 if (!timer_pending(&dev->stat_monitor)) {
1216                         dev->delta = 1;
1217                         mod_timer(&dev->stat_monitor,
1218                                   jiffies + STAT_UPDATE_TIMER);
1219                 }
1220
1221                 tasklet_schedule(&dev->bh);
1222         }
1223
1224         return ret;
1225 }
1226
1227 /* some work can't be done in tasklets, so we use keventd
1228  *
1229  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1230  * but tasklet_schedule() doesn't.      hope the failure is rare.
1231  */
1232 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1233 {
1234         set_bit(work, &dev->flags);
1235         if (!schedule_delayed_work(&dev->wq, 0))
1236                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1237 }
1238
1239 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1240 {
1241         u32 intdata;
1242
1243         if (urb->actual_length != 4) {
1244                 netdev_warn(dev->net,
1245                             "unexpected urb length %d", urb->actual_length);
1246                 return;
1247         }
1248
1249         memcpy(&intdata, urb->transfer_buffer, 4);
1250         le32_to_cpus(&intdata);
1251
1252         if (intdata & INT_ENP_PHY_INT) {
1253                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1254                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1255
1256                 if (dev->domain_data.phyirq > 0)
1257                         generic_handle_irq(dev->domain_data.phyirq);
1258         } else
1259                 netdev_warn(dev->net,
1260                             "unexpected interrupt: 0x%08x\n", intdata);
1261 }
1262
1263 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1264 {
1265         return MAX_EEPROM_SIZE;
1266 }
1267
1268 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1269                                       struct ethtool_eeprom *ee, u8 *data)
1270 {
1271         struct lan78xx_net *dev = netdev_priv(netdev);
1272         int ret;
1273
1274         ret = usb_autopm_get_interface(dev->intf);
1275         if (ret)
1276                 return ret;
1277
1278         ee->magic = LAN78XX_EEPROM_MAGIC;
1279
1280         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1281
1282         usb_autopm_put_interface(dev->intf);
1283
1284         return ret;
1285 }
1286
1287 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1288                                       struct ethtool_eeprom *ee, u8 *data)
1289 {
1290         struct lan78xx_net *dev = netdev_priv(netdev);
1291         int ret;
1292
1293         ret = usb_autopm_get_interface(dev->intf);
1294         if (ret)
1295                 return ret;
1296
1297         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1298          * to load data from EEPROM
1299          */
1300         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1301                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1302         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1303                  (ee->offset == 0) &&
1304                  (ee->len == 512) &&
1305                  (data[0] == OTP_INDICATOR_1))
1306                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1307
1308         usb_autopm_put_interface(dev->intf);
1309
1310         return ret;
1311 }
1312
1313 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1314                                 u8 *data)
1315 {
1316         if (stringset == ETH_SS_STATS)
1317                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1318 }
1319
1320 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1321 {
1322         if (sset == ETH_SS_STATS)
1323                 return ARRAY_SIZE(lan78xx_gstrings);
1324         else
1325                 return -EOPNOTSUPP;
1326 }
1327
1328 static void lan78xx_get_stats(struct net_device *netdev,
1329                               struct ethtool_stats *stats, u64 *data)
1330 {
1331         struct lan78xx_net *dev = netdev_priv(netdev);
1332
1333         lan78xx_update_stats(dev);
1334
1335         mutex_lock(&dev->stats.access_lock);
1336         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1337         mutex_unlock(&dev->stats.access_lock);
1338 }
1339
1340 static void lan78xx_get_wol(struct net_device *netdev,
1341                             struct ethtool_wolinfo *wol)
1342 {
1343         struct lan78xx_net *dev = netdev_priv(netdev);
1344         int ret;
1345         u32 buf;
1346         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1347
1348         if (usb_autopm_get_interface(dev->intf) < 0)
1349                         return;
1350
1351         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1352         if (unlikely(ret < 0)) {
1353                 wol->supported = 0;
1354                 wol->wolopts = 0;
1355         } else {
1356                 if (buf & USB_CFG_RMT_WKP_) {
1357                         wol->supported = WAKE_ALL;
1358                         wol->wolopts = pdata->wol;
1359                 } else {
1360                         wol->supported = 0;
1361                         wol->wolopts = 0;
1362                 }
1363         }
1364
1365         usb_autopm_put_interface(dev->intf);
1366 }
1367
1368 static int lan78xx_set_wol(struct net_device *netdev,
1369                            struct ethtool_wolinfo *wol)
1370 {
1371         struct lan78xx_net *dev = netdev_priv(netdev);
1372         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1373         int ret;
1374
1375         ret = usb_autopm_get_interface(dev->intf);
1376         if (ret < 0)
1377                 return ret;
1378
1379         if (wol->wolopts & ~WAKE_ALL)
1380                 return -EINVAL;
1381
1382         pdata->wol = wol->wolopts;
1383
1384         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1385
1386         phy_ethtool_set_wol(netdev->phydev, wol);
1387
1388         usb_autopm_put_interface(dev->intf);
1389
1390         return ret;
1391 }
1392
1393 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1394 {
1395         struct lan78xx_net *dev = netdev_priv(net);
1396         struct phy_device *phydev = net->phydev;
1397         int ret;
1398         u32 buf;
1399
1400         ret = usb_autopm_get_interface(dev->intf);
1401         if (ret < 0)
1402                 return ret;
1403
1404         ret = phy_ethtool_get_eee(phydev, edata);
1405         if (ret < 0)
1406                 goto exit;
1407
1408         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1409         if (buf & MAC_CR_EEE_EN_) {
1410                 edata->eee_enabled = true;
1411                 edata->eee_active = !!(edata->advertised &
1412                                        edata->lp_advertised);
1413                 edata->tx_lpi_enabled = true;
1414                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1415                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1416                 edata->tx_lpi_timer = buf;
1417         } else {
1418                 edata->eee_enabled = false;
1419                 edata->eee_active = false;
1420                 edata->tx_lpi_enabled = false;
1421                 edata->tx_lpi_timer = 0;
1422         }
1423
1424         ret = 0;
1425 exit:
1426         usb_autopm_put_interface(dev->intf);
1427
1428         return ret;
1429 }
1430
1431 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1432 {
1433         struct lan78xx_net *dev = netdev_priv(net);
1434         int ret;
1435         u32 buf;
1436
1437         ret = usb_autopm_get_interface(dev->intf);
1438         if (ret < 0)
1439                 return ret;
1440
1441         if (edata->eee_enabled) {
1442                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1443                 buf |= MAC_CR_EEE_EN_;
1444                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1445
1446                 phy_ethtool_set_eee(net->phydev, edata);
1447
1448                 buf = (u32)edata->tx_lpi_timer;
1449                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1450         } else {
1451                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1452                 buf &= ~MAC_CR_EEE_EN_;
1453                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1454         }
1455
1456         usb_autopm_put_interface(dev->intf);
1457
1458         return 0;
1459 }
1460
1461 static u32 lan78xx_get_link(struct net_device *net)
1462 {
1463         u32 link;
1464
1465         mutex_lock(&net->phydev->lock);
1466         phy_read_status(net->phydev);
1467         link = net->phydev->link;
1468         mutex_unlock(&net->phydev->lock);
1469
1470         return link;
1471 }
1472
1473 static void lan78xx_get_drvinfo(struct net_device *net,
1474                                 struct ethtool_drvinfo *info)
1475 {
1476         struct lan78xx_net *dev = netdev_priv(net);
1477
1478         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1479         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1480         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1481 }
1482
1483 static u32 lan78xx_get_msglevel(struct net_device *net)
1484 {
1485         struct lan78xx_net *dev = netdev_priv(net);
1486
1487         return dev->msg_enable;
1488 }
1489
1490 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1491 {
1492         struct lan78xx_net *dev = netdev_priv(net);
1493
1494         dev->msg_enable = level;
1495 }
1496
1497 static int lan78xx_get_link_ksettings(struct net_device *net,
1498                                       struct ethtool_link_ksettings *cmd)
1499 {
1500         struct lan78xx_net *dev = netdev_priv(net);
1501         struct phy_device *phydev = net->phydev;
1502         int ret;
1503
1504         ret = usb_autopm_get_interface(dev->intf);
1505         if (ret < 0)
1506                 return ret;
1507
1508         phy_ethtool_ksettings_get(phydev, cmd);
1509
1510         usb_autopm_put_interface(dev->intf);
1511
1512         return ret;
1513 }
1514
1515 static int lan78xx_set_link_ksettings(struct net_device *net,
1516                                       const struct ethtool_link_ksettings *cmd)
1517 {
1518         struct lan78xx_net *dev = netdev_priv(net);
1519         struct phy_device *phydev = net->phydev;
1520         int ret = 0;
1521         int temp;
1522
1523         ret = usb_autopm_get_interface(dev->intf);
1524         if (ret < 0)
1525                 return ret;
1526
1527         /* change speed & duplex */
1528         ret = phy_ethtool_ksettings_set(phydev, cmd);
1529
1530         if (!cmd->base.autoneg) {
1531                 /* force link down */
1532                 temp = phy_read(phydev, MII_BMCR);
1533                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1534                 mdelay(1);
1535                 phy_write(phydev, MII_BMCR, temp);
1536         }
1537
1538         usb_autopm_put_interface(dev->intf);
1539
1540         return ret;
1541 }
1542
1543 static void lan78xx_get_pause(struct net_device *net,
1544                               struct ethtool_pauseparam *pause)
1545 {
1546         struct lan78xx_net *dev = netdev_priv(net);
1547         struct phy_device *phydev = net->phydev;
1548         struct ethtool_link_ksettings ecmd;
1549
1550         phy_ethtool_ksettings_get(phydev, &ecmd);
1551
1552         pause->autoneg = dev->fc_autoneg;
1553
1554         if (dev->fc_request_control & FLOW_CTRL_TX)
1555                 pause->tx_pause = 1;
1556
1557         if (dev->fc_request_control & FLOW_CTRL_RX)
1558                 pause->rx_pause = 1;
1559 }
1560
1561 static int lan78xx_set_pause(struct net_device *net,
1562                              struct ethtool_pauseparam *pause)
1563 {
1564         struct lan78xx_net *dev = netdev_priv(net);
1565         struct phy_device *phydev = net->phydev;
1566         struct ethtool_link_ksettings ecmd;
1567         int ret;
1568
1569         phy_ethtool_ksettings_get(phydev, &ecmd);
1570
1571         if (pause->autoneg && !ecmd.base.autoneg) {
1572                 ret = -EINVAL;
1573                 goto exit;
1574         }
1575
1576         dev->fc_request_control = 0;
1577         if (pause->rx_pause)
1578                 dev->fc_request_control |= FLOW_CTRL_RX;
1579
1580         if (pause->tx_pause)
1581                 dev->fc_request_control |= FLOW_CTRL_TX;
1582
1583         if (ecmd.base.autoneg) {
1584                 u32 mii_adv;
1585                 u32 advertising;
1586
1587                 ethtool_convert_link_mode_to_legacy_u32(
1588                         &advertising, ecmd.link_modes.advertising);
1589
1590                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1591                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1592                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1593
1594                 ethtool_convert_legacy_u32_to_link_mode(
1595                         ecmd.link_modes.advertising, advertising);
1596
1597                 phy_ethtool_ksettings_set(phydev, &ecmd);
1598         }
1599
1600         dev->fc_autoneg = pause->autoneg;
1601
1602         ret = 0;
1603 exit:
1604         return ret;
1605 }
1606
1607 static const struct ethtool_ops lan78xx_ethtool_ops = {
1608         .get_link       = lan78xx_get_link,
1609         .nway_reset     = phy_ethtool_nway_reset,
1610         .get_drvinfo    = lan78xx_get_drvinfo,
1611         .get_msglevel   = lan78xx_get_msglevel,
1612         .set_msglevel   = lan78xx_set_msglevel,
1613         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1614         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1615         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1616         .get_ethtool_stats = lan78xx_get_stats,
1617         .get_sset_count = lan78xx_get_sset_count,
1618         .get_strings    = lan78xx_get_strings,
1619         .get_wol        = lan78xx_get_wol,
1620         .set_wol        = lan78xx_set_wol,
1621         .get_eee        = lan78xx_get_eee,
1622         .set_eee        = lan78xx_set_eee,
1623         .get_pauseparam = lan78xx_get_pause,
1624         .set_pauseparam = lan78xx_set_pause,
1625         .get_link_ksettings = lan78xx_get_link_ksettings,
1626         .set_link_ksettings = lan78xx_set_link_ksettings,
1627 };
1628
1629 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1630 {
1631         if (!netif_running(netdev))
1632                 return -EINVAL;
1633
1634         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635 }
1636
1637 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1638 {
1639         u32 addr_lo, addr_hi;
1640         int ret;
1641         u8 addr[6];
1642
1643         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1644         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1645
1646         addr[0] = addr_lo & 0xFF;
1647         addr[1] = (addr_lo >> 8) & 0xFF;
1648         addr[2] = (addr_lo >> 16) & 0xFF;
1649         addr[3] = (addr_lo >> 24) & 0xFF;
1650         addr[4] = addr_hi & 0xFF;
1651         addr[5] = (addr_hi >> 8) & 0xFF;
1652
1653         if (!is_valid_ether_addr(addr)) {
1654                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1655                         /* valid address present in Device Tree */
1656                         netif_dbg(dev, ifup, dev->net,
1657                                   "MAC address read from Device Tree");
1658                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1659                                                  ETH_ALEN, addr) == 0) ||
1660                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1661                                               ETH_ALEN, addr) == 0)) &&
1662                            is_valid_ether_addr(addr)) {
1663                         /* eeprom values are valid so use them */
1664                         netif_dbg(dev, ifup, dev->net,
1665                                   "MAC address read from EEPROM");
1666                 } else {
1667                         /* generate random MAC */
1668                         random_ether_addr(addr);
1669                         netif_dbg(dev, ifup, dev->net,
1670                                   "MAC address set to random addr");
1671                 }
1672
1673                 addr_lo = addr[0] | (addr[1] << 8) |
1674                           (addr[2] << 16) | (addr[3] << 24);
1675                 addr_hi = addr[4] | (addr[5] << 8);
1676
1677                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1678                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1679         }
1680
1681         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1682         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1683
1684         ether_addr_copy(dev->net->dev_addr, addr);
1685 }
1686
1687 /* MDIO read and write wrappers for phylib */
1688 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1689 {
1690         struct lan78xx_net *dev = bus->priv;
1691         u32 val, addr;
1692         int ret;
1693
1694         ret = usb_autopm_get_interface(dev->intf);
1695         if (ret < 0)
1696                 return ret;
1697
1698         mutex_lock(&dev->phy_mutex);
1699
1700         /* confirm MII not busy */
1701         ret = lan78xx_phy_wait_not_busy(dev);
1702         if (ret < 0)
1703                 goto done;
1704
1705         /* set the address, index & direction (read from PHY) */
1706         addr = mii_access(phy_id, idx, MII_READ);
1707         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1708
1709         ret = lan78xx_phy_wait_not_busy(dev);
1710         if (ret < 0)
1711                 goto done;
1712
1713         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1714
1715         ret = (int)(val & 0xFFFF);
1716
1717 done:
1718         mutex_unlock(&dev->phy_mutex);
1719         usb_autopm_put_interface(dev->intf);
1720
1721         return ret;
1722 }
1723
1724 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1725                                  u16 regval)
1726 {
1727         struct lan78xx_net *dev = bus->priv;
1728         u32 val, addr;
1729         int ret;
1730
1731         ret = usb_autopm_get_interface(dev->intf);
1732         if (ret < 0)
1733                 return ret;
1734
1735         mutex_lock(&dev->phy_mutex);
1736
1737         /* confirm MII not busy */
1738         ret = lan78xx_phy_wait_not_busy(dev);
1739         if (ret < 0)
1740                 goto done;
1741
1742         val = (u32)regval;
1743         ret = lan78xx_write_reg(dev, MII_DATA, val);
1744
1745         /* set the address, index & direction (write to PHY) */
1746         addr = mii_access(phy_id, idx, MII_WRITE);
1747         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1748
1749         ret = lan78xx_phy_wait_not_busy(dev);
1750         if (ret < 0)
1751                 goto done;
1752
1753 done:
1754         mutex_unlock(&dev->phy_mutex);
1755         usb_autopm_put_interface(dev->intf);
1756         return 0;
1757 }
1758
1759 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1760 {
1761         int ret;
1762
1763         dev->mdiobus = mdiobus_alloc();
1764         if (!dev->mdiobus) {
1765                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1766                 return -ENOMEM;
1767         }
1768
1769         dev->mdiobus->priv = (void *)dev;
1770         dev->mdiobus->read = lan78xx_mdiobus_read;
1771         dev->mdiobus->write = lan78xx_mdiobus_write;
1772         dev->mdiobus->name = "lan78xx-mdiobus";
1773         dev->mdiobus->parent = &dev->udev->dev;
1774
1775         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1776                  dev->udev->bus->busnum, dev->udev->devnum);
1777
1778         switch (dev->chipid) {
1779         case ID_REV_CHIP_ID_7800_:
1780         case ID_REV_CHIP_ID_7850_:
1781                 /* set to internal PHY id */
1782                 dev->mdiobus->phy_mask = ~(1 << 1);
1783                 break;
1784         case ID_REV_CHIP_ID_7801_:
1785                 /* scan thru PHYAD[2..0] */
1786                 dev->mdiobus->phy_mask = ~(0xFF);
1787                 break;
1788         }
1789
1790         ret = mdiobus_register(dev->mdiobus);
1791         if (ret) {
1792                 netdev_err(dev->net, "can't register MDIO bus\n");
1793                 goto exit1;
1794         }
1795
1796         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1797         return 0;
1798 exit1:
1799         mdiobus_free(dev->mdiobus);
1800         return ret;
1801 }
1802
1803 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1804 {
1805         mdiobus_unregister(dev->mdiobus);
1806         mdiobus_free(dev->mdiobus);
1807 }
1808
1809 static void lan78xx_link_status_change(struct net_device *net)
1810 {
1811         struct phy_device *phydev = net->phydev;
1812         int ret, temp;
1813
1814         /* At forced 100 F/H mode, chip may fail to set mode correctly
1815          * when cable is switched between long(~50+m) and short one.
1816          * As workaround, set to 10 before setting to 100
1817          * at forced 100 F/H mode.
1818          */
1819         if (!phydev->autoneg && (phydev->speed == 100)) {
1820                 /* disable phy interrupt */
1821                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1822                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1823                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1824
1825                 temp = phy_read(phydev, MII_BMCR);
1826                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1827                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1828                 temp |= BMCR_SPEED100;
1829                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1830
1831                 /* clear pending interrupt generated while workaround */
1832                 temp = phy_read(phydev, LAN88XX_INT_STS);
1833
1834                 /* enable phy interrupt back */
1835                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1836                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1837                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1838         }
1839 }
1840
1841 static int irq_map(struct irq_domain *d, unsigned int irq,
1842                    irq_hw_number_t hwirq)
1843 {
1844         struct irq_domain_data *data = d->host_data;
1845
1846         irq_set_chip_data(irq, data);
1847         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1848         irq_set_noprobe(irq);
1849
1850         return 0;
1851 }
1852
1853 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1854 {
1855         irq_set_chip_and_handler(irq, NULL, NULL);
1856         irq_set_chip_data(irq, NULL);
1857 }
1858
1859 static const struct irq_domain_ops chip_domain_ops = {
1860         .map    = irq_map,
1861         .unmap  = irq_unmap,
1862 };
1863
1864 static void lan78xx_irq_mask(struct irq_data *irqd)
1865 {
1866         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1867
1868         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1869 }
1870
1871 static void lan78xx_irq_unmask(struct irq_data *irqd)
1872 {
1873         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1874
1875         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1876 }
1877
1878 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1879 {
1880         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1881
1882         mutex_lock(&data->irq_lock);
1883 }
1884
1885 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1886 {
1887         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1888         struct lan78xx_net *dev =
1889                         container_of(data, struct lan78xx_net, domain_data);
1890         u32 buf;
1891         int ret;
1892
1893         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1894          * are only two callbacks executed in non-atomic contex.
1895          */
1896         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1897         if (buf != data->irqenable)
1898                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1899
1900         mutex_unlock(&data->irq_lock);
1901 }
1902
1903 static struct irq_chip lan78xx_irqchip = {
1904         .name                   = "lan78xx-irqs",
1905         .irq_mask               = lan78xx_irq_mask,
1906         .irq_unmask             = lan78xx_irq_unmask,
1907         .irq_bus_lock           = lan78xx_irq_bus_lock,
1908         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1909 };
1910
1911 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1912 {
1913         struct device_node *of_node;
1914         struct irq_domain *irqdomain;
1915         unsigned int irqmap = 0;
1916         u32 buf;
1917         int ret = 0;
1918
1919         of_node = dev->udev->dev.parent->of_node;
1920
1921         mutex_init(&dev->domain_data.irq_lock);
1922
1923         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1924         dev->domain_data.irqenable = buf;
1925
1926         dev->domain_data.irqchip = &lan78xx_irqchip;
1927         dev->domain_data.irq_handler = handle_simple_irq;
1928
1929         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1930                                           &chip_domain_ops, &dev->domain_data);
1931         if (irqdomain) {
1932                 /* create mapping for PHY interrupt */
1933                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1934                 if (!irqmap) {
1935                         irq_domain_remove(irqdomain);
1936
1937                         irqdomain = NULL;
1938                         ret = -EINVAL;
1939                 }
1940         } else {
1941                 ret = -EINVAL;
1942         }
1943
1944         dev->domain_data.irqdomain = irqdomain;
1945         dev->domain_data.phyirq = irqmap;
1946
1947         return ret;
1948 }
1949
1950 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1951 {
1952         if (dev->domain_data.phyirq > 0) {
1953                 irq_dispose_mapping(dev->domain_data.phyirq);
1954
1955                 if (dev->domain_data.irqdomain)
1956                         irq_domain_remove(dev->domain_data.irqdomain);
1957         }
1958         dev->domain_data.phyirq = 0;
1959         dev->domain_data.irqdomain = NULL;
1960 }
1961
1962 static int lan8835_fixup(struct phy_device *phydev)
1963 {
1964         int buf;
1965         int ret;
1966         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1967
1968         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1969         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1970         buf &= ~0x1800;
1971         buf |= 0x0800;
1972         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1973
1974         /* RGMII MAC TXC Delay Enable */
1975         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1976                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1977
1978         /* RGMII TX DLL Tune Adjust */
1979         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1980
1981         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1982
1983         return 1;
1984 }
1985
1986 static int ksz9031rnx_fixup(struct phy_device *phydev)
1987 {
1988         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1989
1990         /* Micrel9301RNX PHY configuration */
1991         /* RGMII Control Signal Pad Skew */
1992         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1993         /* RGMII RX Data Pad Skew */
1994         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1995         /* RGMII RX Clock Pad Skew */
1996         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1997
1998         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1999
2000         return 1;
2001 }
2002
2003 static int lan78xx_phy_init(struct lan78xx_net *dev)
2004 {
2005         int ret;
2006         u32 mii_adv;
2007         struct phy_device *phydev = dev->net->phydev;
2008
2009         phydev = phy_find_first(dev->mdiobus);
2010         if (!phydev) {
2011                 netdev_err(dev->net, "no PHY found\n");
2012                 return -EIO;
2013         }
2014
2015         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2016             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2017                 phydev->is_internal = true;
2018                 dev->interface = PHY_INTERFACE_MODE_GMII;
2019
2020         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2021                 if (!phydev->drv) {
2022                         netdev_err(dev->net, "no PHY driver found\n");
2023                         return -EIO;
2024                 }
2025
2026                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2027
2028                 /* external PHY fixup for KSZ9031RNX */
2029                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2030                                                  ksz9031rnx_fixup);
2031                 if (ret < 0) {
2032                         netdev_err(dev->net, "fail to register fixup\n");
2033                         return ret;
2034                 }
2035                 /* external PHY fixup for LAN8835 */
2036                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2037                                                  lan8835_fixup);
2038                 if (ret < 0) {
2039                         netdev_err(dev->net, "fail to register fixup\n");
2040                         return ret;
2041                 }
2042                 /* add more external PHY fixup here if needed */
2043
2044                 phydev->is_internal = false;
2045         } else {
2046                 netdev_err(dev->net, "unknown ID found\n");
2047                 ret = -EIO;
2048                 goto error;
2049         }
2050
2051         /* if phyirq is not set, use polling mode in phylib */
2052         if (dev->domain_data.phyirq > 0)
2053                 phydev->irq = dev->domain_data.phyirq;
2054         else
2055                 phydev->irq = PHY_POLL;
2056         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2057
2058         /* set to AUTOMDIX */
2059         phydev->mdix = ETH_TP_MDI_AUTO;
2060
2061         ret = phy_connect_direct(dev->net, phydev,
2062                                  lan78xx_link_status_change,
2063                                  dev->interface);
2064         if (ret) {
2065                 netdev_err(dev->net, "can't attach PHY to %s\n",
2066                            dev->mdiobus->id);
2067                 return -EIO;
2068         }
2069
2070         /* MAC doesn't support 1000T Half */
2071         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2072
2073         /* support both flow controls */
2074         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2075         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2076         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2077         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2078
2079         genphy_config_aneg(phydev);
2080
2081         dev->fc_autoneg = phydev->autoneg;
2082
2083         return 0;
2084
2085 error:
2086         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2087         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2088
2089         return ret;
2090 }
2091
2092 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2093 {
2094         int ret = 0;
2095         u32 buf;
2096         bool rxenabled;
2097
2098         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2099
2100         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2101
2102         if (rxenabled) {
2103                 buf &= ~MAC_RX_RXEN_;
2104                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2105         }
2106
2107         /* add 4 to size for FCS */
2108         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2109         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2110
2111         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2112
2113         if (rxenabled) {
2114                 buf |= MAC_RX_RXEN_;
2115                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2116         }
2117
2118         return 0;
2119 }
2120
2121 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2122 {
2123         struct sk_buff *skb;
2124         unsigned long flags;
2125         int count = 0;
2126
2127         spin_lock_irqsave(&q->lock, flags);
2128         while (!skb_queue_empty(q)) {
2129                 struct skb_data *entry;
2130                 struct urb *urb;
2131                 int ret;
2132
2133                 skb_queue_walk(q, skb) {
2134                         entry = (struct skb_data *)skb->cb;
2135                         if (entry->state != unlink_start)
2136                                 goto found;
2137                 }
2138                 break;
2139 found:
2140                 entry->state = unlink_start;
2141                 urb = entry->urb;
2142
2143                 /* Get reference count of the URB to avoid it to be
2144                  * freed during usb_unlink_urb, which may trigger
2145                  * use-after-free problem inside usb_unlink_urb since
2146                  * usb_unlink_urb is always racing with .complete
2147                  * handler(include defer_bh).
2148                  */
2149                 usb_get_urb(urb);
2150                 spin_unlock_irqrestore(&q->lock, flags);
2151                 /* during some PM-driven resume scenarios,
2152                  * these (async) unlinks complete immediately
2153                  */
2154                 ret = usb_unlink_urb(urb);
2155                 if (ret != -EINPROGRESS && ret != 0)
2156                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2157                 else
2158                         count++;
2159                 usb_put_urb(urb);
2160                 spin_lock_irqsave(&q->lock, flags);
2161         }
2162         spin_unlock_irqrestore(&q->lock, flags);
2163         return count;
2164 }
2165
2166 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2167 {
2168         struct lan78xx_net *dev = netdev_priv(netdev);
2169         int ll_mtu = new_mtu + netdev->hard_header_len;
2170         int old_hard_mtu = dev->hard_mtu;
2171         int old_rx_urb_size = dev->rx_urb_size;
2172         int ret;
2173
2174         /* no second zero-length packet read wanted after mtu-sized packets */
2175         if ((ll_mtu % dev->maxpacket) == 0)
2176                 return -EDOM;
2177
2178         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2179
2180         netdev->mtu = new_mtu;
2181
2182         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2183         if (dev->rx_urb_size == old_hard_mtu) {
2184                 dev->rx_urb_size = dev->hard_mtu;
2185                 if (dev->rx_urb_size > old_rx_urb_size) {
2186                         if (netif_running(dev->net)) {
2187                                 unlink_urbs(dev, &dev->rxq);
2188                                 tasklet_schedule(&dev->bh);
2189                         }
2190                 }
2191         }
2192
2193         return 0;
2194 }
2195
2196 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2197 {
2198         struct lan78xx_net *dev = netdev_priv(netdev);
2199         struct sockaddr *addr = p;
2200         u32 addr_lo, addr_hi;
2201         int ret;
2202
2203         if (netif_running(netdev))
2204                 return -EBUSY;
2205
2206         if (!is_valid_ether_addr(addr->sa_data))
2207                 return -EADDRNOTAVAIL;
2208
2209         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2210
2211         addr_lo = netdev->dev_addr[0] |
2212                   netdev->dev_addr[1] << 8 |
2213                   netdev->dev_addr[2] << 16 |
2214                   netdev->dev_addr[3] << 24;
2215         addr_hi = netdev->dev_addr[4] |
2216                   netdev->dev_addr[5] << 8;
2217
2218         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2219         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2220
2221         /* Added to support MAC address changes */
2222         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2223         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2224
2225         return 0;
2226 }
2227
2228 /* Enable or disable Rx checksum offload engine */
2229 static int lan78xx_set_features(struct net_device *netdev,
2230                                 netdev_features_t features)
2231 {
2232         struct lan78xx_net *dev = netdev_priv(netdev);
2233         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2234         unsigned long flags;
2235         int ret;
2236
2237         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2238
2239         if (features & NETIF_F_RXCSUM) {
2240                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2241                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2242         } else {
2243                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2244                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2245         }
2246
2247         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2248                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2249         else
2250                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2251
2252         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2253
2254         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2255
2256         return 0;
2257 }
2258
2259 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2260 {
2261         struct lan78xx_priv *pdata =
2262                         container_of(param, struct lan78xx_priv, set_vlan);
2263         struct lan78xx_net *dev = pdata->dev;
2264
2265         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2266                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2267 }
2268
2269 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2270                                    __be16 proto, u16 vid)
2271 {
2272         struct lan78xx_net *dev = netdev_priv(netdev);
2273         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2274         u16 vid_bit_index;
2275         u16 vid_dword_index;
2276
2277         vid_dword_index = (vid >> 5) & 0x7F;
2278         vid_bit_index = vid & 0x1F;
2279
2280         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2281
2282         /* defer register writes to a sleepable context */
2283         schedule_work(&pdata->set_vlan);
2284
2285         return 0;
2286 }
2287
2288 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2289                                     __be16 proto, u16 vid)
2290 {
2291         struct lan78xx_net *dev = netdev_priv(netdev);
2292         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2293         u16 vid_bit_index;
2294         u16 vid_dword_index;
2295
2296         vid_dword_index = (vid >> 5) & 0x7F;
2297         vid_bit_index = vid & 0x1F;
2298
2299         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2300
2301         /* defer register writes to a sleepable context */
2302         schedule_work(&pdata->set_vlan);
2303
2304         return 0;
2305 }
2306
2307 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2308 {
2309         int ret;
2310         u32 buf;
2311         u32 regs[6] = { 0 };
2312
2313         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2314         if (buf & USB_CFG1_LTM_ENABLE_) {
2315                 u8 temp[2];
2316                 /* Get values from EEPROM first */
2317                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2318                         if (temp[0] == 24) {
2319                                 ret = lan78xx_read_raw_eeprom(dev,
2320                                                               temp[1] * 2,
2321                                                               24,
2322                                                               (u8 *)regs);
2323                                 if (ret < 0)
2324                                         return;
2325                         }
2326                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2327                         if (temp[0] == 24) {
2328                                 ret = lan78xx_read_raw_otp(dev,
2329                                                            temp[1] * 2,
2330                                                            24,
2331                                                            (u8 *)regs);
2332                                 if (ret < 0)
2333                                         return;
2334                         }
2335                 }
2336         }
2337
2338         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2339         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2340         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2341         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2342         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2343         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2344 }
2345
2346 static int lan78xx_reset(struct lan78xx_net *dev)
2347 {
2348         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2349         u32 buf;
2350         int ret = 0;
2351         unsigned long timeout;
2352         u8 sig;
2353
2354         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2355         buf |= HW_CFG_LRST_;
2356         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2357
2358         timeout = jiffies + HZ;
2359         do {
2360                 mdelay(1);
2361                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2362                 if (time_after(jiffies, timeout)) {
2363                         netdev_warn(dev->net,
2364                                     "timeout on completion of LiteReset");
2365                         return -EIO;
2366                 }
2367         } while (buf & HW_CFG_LRST_);
2368
2369         lan78xx_init_mac_address(dev);
2370
2371         /* save DEVID for later usage */
2372         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2373         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2374         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2375
2376         /* Respond to the IN token with a NAK */
2377         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2378         buf |= USB_CFG_BIR_;
2379         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2380
2381         /* Init LTM */
2382         lan78xx_init_ltm(dev);
2383
2384         if (dev->udev->speed == USB_SPEED_SUPER) {
2385                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2386                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2387                 dev->rx_qlen = 4;
2388                 dev->tx_qlen = 4;
2389         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2390                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2391                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2392                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2393                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2394         } else {
2395                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2396                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2397                 dev->rx_qlen = 4;
2398                 dev->tx_qlen = 4;
2399         }
2400
2401         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2402         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2403
2404         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2405         buf |= HW_CFG_MEF_;
2406         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2407
2408         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2409         buf |= USB_CFG_BCE_;
2410         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2411
2412         /* set FIFO sizes */
2413         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2414         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2415
2416         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2417         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2418
2419         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2420         ret = lan78xx_write_reg(dev, FLOW, 0);
2421         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2422
2423         /* Don't need rfe_ctl_lock during initialisation */
2424         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2425         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2426         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2427
2428         /* Enable or disable checksum offload engines */
2429         lan78xx_set_features(dev->net, dev->net->features);
2430
2431         lan78xx_set_multicast(dev->net);
2432
2433         /* reset PHY */
2434         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2435         buf |= PMT_CTL_PHY_RST_;
2436         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2437
2438         timeout = jiffies + HZ;
2439         do {
2440                 mdelay(1);
2441                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2442                 if (time_after(jiffies, timeout)) {
2443                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2444                         return -EIO;
2445                 }
2446         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2447
2448         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2449         /* LAN7801 only has RGMII mode */
2450         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2451                 buf &= ~MAC_CR_GMII_EN_;
2452
2453         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2454                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2455                 if (!ret && sig != EEPROM_INDICATOR) {
2456                         /* Implies there is no external eeprom. Set mac speed */
2457                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2458                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2459                 }
2460         }
2461         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2462
2463         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2464         buf |= MAC_TX_TXEN_;
2465         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2466
2467         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2468         buf |= FCT_TX_CTL_EN_;
2469         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2470
2471         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2472
2473         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2474         buf |= MAC_RX_RXEN_;
2475         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2476
2477         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2478         buf |= FCT_RX_CTL_EN_;
2479         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2480
2481         return 0;
2482 }
2483
2484 static void lan78xx_init_stats(struct lan78xx_net *dev)
2485 {
2486         u32 *p;
2487         int i;
2488
2489         /* initialize for stats update
2490          * some counters are 20bits and some are 32bits
2491          */
2492         p = (u32 *)&dev->stats.rollover_max;
2493         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2494                 p[i] = 0xFFFFF;
2495
2496         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2497         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2498         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2499         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2500         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2501         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2502         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2503         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2504         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2505         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2506
2507         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2508 }
2509
2510 static int lan78xx_open(struct net_device *net)
2511 {
2512         struct lan78xx_net *dev = netdev_priv(net);
2513         int ret;
2514
2515         ret = usb_autopm_get_interface(dev->intf);
2516         if (ret < 0)
2517                 goto out;
2518
2519         phy_start(net->phydev);
2520
2521         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2522
2523         /* for Link Check */
2524         if (dev->urb_intr) {
2525                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2526                 if (ret < 0) {
2527                         netif_err(dev, ifup, dev->net,
2528                                   "intr submit %d\n", ret);
2529                         goto done;
2530                 }
2531         }
2532
2533         lan78xx_init_stats(dev);
2534
2535         set_bit(EVENT_DEV_OPEN, &dev->flags);
2536
2537         netif_start_queue(net);
2538
2539         dev->link_on = false;
2540
2541         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2542 done:
2543         usb_autopm_put_interface(dev->intf);
2544
2545 out:
2546         return ret;
2547 }
2548
2549 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2550 {
2551         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2552         DECLARE_WAITQUEUE(wait, current);
2553         int temp;
2554
2555         /* ensure there are no more active urbs */
2556         add_wait_queue(&unlink_wakeup, &wait);
2557         set_current_state(TASK_UNINTERRUPTIBLE);
2558         dev->wait = &unlink_wakeup;
2559         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2560
2561         /* maybe wait for deletions to finish. */
2562         while (!skb_queue_empty(&dev->rxq) &&
2563                !skb_queue_empty(&dev->txq) &&
2564                !skb_queue_empty(&dev->done)) {
2565                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2566                 set_current_state(TASK_UNINTERRUPTIBLE);
2567                 netif_dbg(dev, ifdown, dev->net,
2568                           "waited for %d urb completions\n", temp);
2569         }
2570         set_current_state(TASK_RUNNING);
2571         dev->wait = NULL;
2572         remove_wait_queue(&unlink_wakeup, &wait);
2573 }
2574
2575 static int lan78xx_stop(struct net_device *net)
2576 {
2577         struct lan78xx_net              *dev = netdev_priv(net);
2578
2579         if (timer_pending(&dev->stat_monitor))
2580                 del_timer_sync(&dev->stat_monitor);
2581
2582         if (net->phydev)
2583                 phy_stop(net->phydev);
2584
2585         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2586         netif_stop_queue(net);
2587
2588         netif_info(dev, ifdown, dev->net,
2589                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2590                    net->stats.rx_packets, net->stats.tx_packets,
2591                    net->stats.rx_errors, net->stats.tx_errors);
2592
2593         lan78xx_terminate_urbs(dev);
2594
2595         usb_kill_urb(dev->urb_intr);
2596
2597         skb_queue_purge(&dev->rxq_pause);
2598
2599         /* deferred work (task, timer, softirq) must also stop.
2600          * can't flush_scheduled_work() until we drop rtnl (later),
2601          * else workers could deadlock; so make workers a NOP.
2602          */
2603         dev->flags = 0;
2604         cancel_delayed_work_sync(&dev->wq);
2605         tasklet_kill(&dev->bh);
2606
2607         usb_autopm_put_interface(dev->intf);
2608
2609         return 0;
2610 }
2611
2612 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2613                                        struct sk_buff *skb, gfp_t flags)
2614 {
2615         u32 tx_cmd_a, tx_cmd_b;
2616
2617         if (skb_cow_head(skb, TX_OVERHEAD)) {
2618                 dev_kfree_skb_any(skb);
2619                 return NULL;
2620         }
2621
2622         if (skb_linearize(skb)) {
2623                 dev_kfree_skb_any(skb);
2624                 return NULL;
2625         }
2626
2627         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2628
2629         if (skb->ip_summed == CHECKSUM_PARTIAL)
2630                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2631
2632         tx_cmd_b = 0;
2633         if (skb_is_gso(skb)) {
2634                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2635
2636                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2637
2638                 tx_cmd_a |= TX_CMD_A_LSO_;
2639         }
2640
2641         if (skb_vlan_tag_present(skb)) {
2642                 tx_cmd_a |= TX_CMD_A_IVTG_;
2643                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2644         }
2645
2646         skb_push(skb, 4);
2647         cpu_to_le32s(&tx_cmd_b);
2648         memcpy(skb->data, &tx_cmd_b, 4);
2649
2650         skb_push(skb, 4);
2651         cpu_to_le32s(&tx_cmd_a);
2652         memcpy(skb->data, &tx_cmd_a, 4);
2653
2654         return skb;
2655 }
2656
2657 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2658                                struct sk_buff_head *list, enum skb_state state)
2659 {
2660         unsigned long flags;
2661         enum skb_state old_state;
2662         struct skb_data *entry = (struct skb_data *)skb->cb;
2663
2664         spin_lock_irqsave(&list->lock, flags);
2665         old_state = entry->state;
2666         entry->state = state;
2667
2668         __skb_unlink(skb, list);
2669         spin_unlock(&list->lock);
2670         spin_lock(&dev->done.lock);
2671
2672         __skb_queue_tail(&dev->done, skb);
2673         if (skb_queue_len(&dev->done) == 1)
2674                 tasklet_schedule(&dev->bh);
2675         spin_unlock_irqrestore(&dev->done.lock, flags);
2676
2677         return old_state;
2678 }
2679
2680 static void tx_complete(struct urb *urb)
2681 {
2682         struct sk_buff *skb = (struct sk_buff *)urb->context;
2683         struct skb_data *entry = (struct skb_data *)skb->cb;
2684         struct lan78xx_net *dev = entry->dev;
2685
2686         if (urb->status == 0) {
2687                 dev->net->stats.tx_packets += entry->num_of_packet;
2688                 dev->net->stats.tx_bytes += entry->length;
2689         } else {
2690                 dev->net->stats.tx_errors++;
2691
2692                 switch (urb->status) {
2693                 case -EPIPE:
2694                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2695                         break;
2696
2697                 /* software-driven interface shutdown */
2698                 case -ECONNRESET:
2699                 case -ESHUTDOWN:
2700                         break;
2701
2702                 case -EPROTO:
2703                 case -ETIME:
2704                 case -EILSEQ:
2705                         netif_stop_queue(dev->net);
2706                         break;
2707                 default:
2708                         netif_dbg(dev, tx_err, dev->net,
2709                                   "tx err %d\n", entry->urb->status);
2710                         break;
2711                 }
2712         }
2713
2714         usb_autopm_put_interface_async(dev->intf);
2715
2716         defer_bh(dev, skb, &dev->txq, tx_done);
2717 }
2718
2719 static void lan78xx_queue_skb(struct sk_buff_head *list,
2720                               struct sk_buff *newsk, enum skb_state state)
2721 {
2722         struct skb_data *entry = (struct skb_data *)newsk->cb;
2723
2724         __skb_queue_tail(list, newsk);
2725         entry->state = state;
2726 }
2727
2728 static netdev_tx_t
2729 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2730 {
2731         struct lan78xx_net *dev = netdev_priv(net);
2732         struct sk_buff *skb2 = NULL;
2733
2734         if (skb) {
2735                 skb_tx_timestamp(skb);
2736                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2737         }
2738
2739         if (skb2) {
2740                 skb_queue_tail(&dev->txq_pend, skb2);
2741
2742                 /* throttle TX patch at slower than SUPER SPEED USB */
2743                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2744                     (skb_queue_len(&dev->txq_pend) > 10))
2745                         netif_stop_queue(net);
2746         } else {
2747                 netif_dbg(dev, tx_err, dev->net,
2748                           "lan78xx_tx_prep return NULL\n");
2749                 dev->net->stats.tx_errors++;
2750                 dev->net->stats.tx_dropped++;
2751         }
2752
2753         tasklet_schedule(&dev->bh);
2754
2755         return NETDEV_TX_OK;
2756 }
2757
2758 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2759 {
2760         struct lan78xx_priv *pdata = NULL;
2761         int ret;
2762         int i;
2763
2764         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2765
2766         pdata = (struct lan78xx_priv *)(dev->data[0]);
2767         if (!pdata) {
2768                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2769                 return -ENOMEM;
2770         }
2771
2772         pdata->dev = dev;
2773
2774         spin_lock_init(&pdata->rfe_ctl_lock);
2775         mutex_init(&pdata->dataport_mutex);
2776
2777         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2778
2779         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2780                 pdata->vlan_table[i] = 0;
2781
2782         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2783
2784         dev->net->features = 0;
2785
2786         if (DEFAULT_TX_CSUM_ENABLE)
2787                 dev->net->features |= NETIF_F_HW_CSUM;
2788
2789         if (DEFAULT_RX_CSUM_ENABLE)
2790                 dev->net->features |= NETIF_F_RXCSUM;
2791
2792         if (DEFAULT_TSO_CSUM_ENABLE)
2793                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2794
2795         dev->net->hw_features = dev->net->features;
2796
2797         ret = lan78xx_setup_irq_domain(dev);
2798         if (ret < 0) {
2799                 netdev_warn(dev->net,
2800                             "lan78xx_setup_irq_domain() failed : %d", ret);
2801                 goto out1;
2802         }
2803
2804         dev->net->hard_header_len += TX_OVERHEAD;
2805         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2806
2807         /* Init all registers */
2808         ret = lan78xx_reset(dev);
2809         if (ret) {
2810                 netdev_warn(dev->net, "Registers INIT FAILED....");
2811                 goto out2;
2812         }
2813
2814         ret = lan78xx_mdio_init(dev);
2815         if (ret) {
2816                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2817                 goto out2;
2818         }
2819
2820         dev->net->flags |= IFF_MULTICAST;
2821
2822         pdata->wol = WAKE_MAGIC;
2823
2824         return ret;
2825
2826 out2:
2827         lan78xx_remove_irq_domain(dev);
2828
2829 out1:
2830         netdev_warn(dev->net, "Bind routine FAILED");
2831         cancel_work_sync(&pdata->set_multicast);
2832         cancel_work_sync(&pdata->set_vlan);
2833         kfree(pdata);
2834         return ret;
2835 }
2836
2837 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2838 {
2839         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2840
2841         lan78xx_remove_irq_domain(dev);
2842
2843         lan78xx_remove_mdio(dev);
2844
2845         if (pdata) {
2846                 cancel_work_sync(&pdata->set_multicast);
2847                 cancel_work_sync(&pdata->set_vlan);
2848                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2849                 kfree(pdata);
2850                 pdata = NULL;
2851                 dev->data[0] = 0;
2852         }
2853 }
2854
2855 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2856                                     struct sk_buff *skb,
2857                                     u32 rx_cmd_a, u32 rx_cmd_b)
2858 {
2859         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2860             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2861                 skb->ip_summed = CHECKSUM_NONE;
2862         } else {
2863                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2864                 skb->ip_summed = CHECKSUM_COMPLETE;
2865         }
2866 }
2867
2868 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2869 {
2870         int             status;
2871
2872         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2873                 skb_queue_tail(&dev->rxq_pause, skb);
2874                 return;
2875         }
2876
2877         dev->net->stats.rx_packets++;
2878         dev->net->stats.rx_bytes += skb->len;
2879
2880         skb->protocol = eth_type_trans(skb, dev->net);
2881
2882         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2883                   skb->len + sizeof(struct ethhdr), skb->protocol);
2884         memset(skb->cb, 0, sizeof(struct skb_data));
2885
2886         if (skb_defer_rx_timestamp(skb))
2887                 return;
2888
2889         status = netif_rx(skb);
2890         if (status != NET_RX_SUCCESS)
2891                 netif_dbg(dev, rx_err, dev->net,
2892                           "netif_rx status %d\n", status);
2893 }
2894
2895 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2896 {
2897         if (skb->len < dev->net->hard_header_len)
2898                 return 0;
2899
2900         while (skb->len > 0) {
2901                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2902                 u16 rx_cmd_c;
2903                 struct sk_buff *skb2;
2904                 unsigned char *packet;
2905
2906                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2907                 le32_to_cpus(&rx_cmd_a);
2908                 skb_pull(skb, sizeof(rx_cmd_a));
2909
2910                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2911                 le32_to_cpus(&rx_cmd_b);
2912                 skb_pull(skb, sizeof(rx_cmd_b));
2913
2914                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2915                 le16_to_cpus(&rx_cmd_c);
2916                 skb_pull(skb, sizeof(rx_cmd_c));
2917
2918                 packet = skb->data;
2919
2920                 /* get the packet length */
2921                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2922                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2923
2924                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2925                         netif_dbg(dev, rx_err, dev->net,
2926                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2927                 } else {
2928                         /* last frame in this batch */
2929                         if (skb->len == size) {
2930                                 lan78xx_rx_csum_offload(dev, skb,
2931                                                         rx_cmd_a, rx_cmd_b);
2932
2933                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2934                                 skb->truesize = size + sizeof(struct sk_buff);
2935
2936                                 return 1;
2937                         }
2938
2939                         skb2 = skb_clone(skb, GFP_ATOMIC);
2940                         if (unlikely(!skb2)) {
2941                                 netdev_warn(dev->net, "Error allocating skb");
2942                                 return 0;
2943                         }
2944
2945                         skb2->len = size;
2946                         skb2->data = packet;
2947                         skb_set_tail_pointer(skb2, size);
2948
2949                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2950
2951                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
2952                         skb2->truesize = size + sizeof(struct sk_buff);
2953
2954                         lan78xx_skb_return(dev, skb2);
2955                 }
2956
2957                 skb_pull(skb, size);
2958
2959                 /* padding bytes before the next frame starts */
2960                 if (skb->len)
2961                         skb_pull(skb, align_count);
2962         }
2963
2964         return 1;
2965 }
2966
2967 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2968 {
2969         if (!lan78xx_rx(dev, skb)) {
2970                 dev->net->stats.rx_errors++;
2971                 goto done;
2972         }
2973
2974         if (skb->len) {
2975                 lan78xx_skb_return(dev, skb);
2976                 return;
2977         }
2978
2979         netif_dbg(dev, rx_err, dev->net, "drop\n");
2980         dev->net->stats.rx_errors++;
2981 done:
2982         skb_queue_tail(&dev->done, skb);
2983 }
2984
2985 static void rx_complete(struct urb *urb);
2986
2987 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2988 {
2989         struct sk_buff *skb;
2990         struct skb_data *entry;
2991         unsigned long lockflags;
2992         size_t size = dev->rx_urb_size;
2993         int ret = 0;
2994
2995         skb = netdev_alloc_skb_ip_align(dev->net, size);
2996         if (!skb) {
2997                 usb_free_urb(urb);
2998                 return -ENOMEM;
2999         }
3000
3001         entry = (struct skb_data *)skb->cb;
3002         entry->urb = urb;
3003         entry->dev = dev;
3004         entry->length = 0;
3005
3006         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3007                           skb->data, size, rx_complete, skb);
3008
3009         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3010
3011         if (netif_device_present(dev->net) &&
3012             netif_running(dev->net) &&
3013             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3014             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3015                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3016                 switch (ret) {
3017                 case 0:
3018                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3019                         break;
3020                 case -EPIPE:
3021                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3022                         break;
3023                 case -ENODEV:
3024                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3025                         netif_device_detach(dev->net);
3026                         break;
3027                 case -EHOSTUNREACH:
3028                         ret = -ENOLINK;
3029                         break;
3030                 default:
3031                         netif_dbg(dev, rx_err, dev->net,
3032                                   "rx submit, %d\n", ret);
3033                         tasklet_schedule(&dev->bh);
3034                 }
3035         } else {
3036                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3037                 ret = -ENOLINK;
3038         }
3039         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3040         if (ret) {
3041                 dev_kfree_skb_any(skb);
3042                 usb_free_urb(urb);
3043         }
3044         return ret;
3045 }
3046
3047 static void rx_complete(struct urb *urb)
3048 {
3049         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3050         struct skb_data *entry = (struct skb_data *)skb->cb;
3051         struct lan78xx_net *dev = entry->dev;
3052         int urb_status = urb->status;
3053         enum skb_state state;
3054
3055         skb_put(skb, urb->actual_length);
3056         state = rx_done;
3057         entry->urb = NULL;
3058
3059         switch (urb_status) {
3060         case 0:
3061                 if (skb->len < dev->net->hard_header_len) {
3062                         state = rx_cleanup;
3063                         dev->net->stats.rx_errors++;
3064                         dev->net->stats.rx_length_errors++;
3065                         netif_dbg(dev, rx_err, dev->net,
3066                                   "rx length %d\n", skb->len);
3067                 }
3068                 usb_mark_last_busy(dev->udev);
3069                 break;
3070         case -EPIPE:
3071                 dev->net->stats.rx_errors++;
3072                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3073                 /* FALLTHROUGH */
3074         case -ECONNRESET:                               /* async unlink */
3075         case -ESHUTDOWN:                                /* hardware gone */
3076                 netif_dbg(dev, ifdown, dev->net,
3077                           "rx shutdown, code %d\n", urb_status);
3078                 state = rx_cleanup;
3079                 entry->urb = urb;
3080                 urb = NULL;
3081                 break;
3082         case -EPROTO:
3083         case -ETIME:
3084         case -EILSEQ:
3085                 dev->net->stats.rx_errors++;
3086                 state = rx_cleanup;
3087                 entry->urb = urb;
3088                 urb = NULL;
3089                 break;
3090
3091         /* data overrun ... flush fifo? */
3092         case -EOVERFLOW:
3093                 dev->net->stats.rx_over_errors++;
3094                 /* FALLTHROUGH */
3095
3096         default:
3097                 state = rx_cleanup;
3098                 dev->net->stats.rx_errors++;
3099                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3100                 break;
3101         }
3102
3103         state = defer_bh(dev, skb, &dev->rxq, state);
3104
3105         if (urb) {
3106                 if (netif_running(dev->net) &&
3107                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3108                     state != unlink_start) {
3109                         rx_submit(dev, urb, GFP_ATOMIC);
3110                         return;
3111                 }
3112                 usb_free_urb(urb);
3113         }
3114         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3115 }
3116
3117 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3118 {
3119         int length;
3120         struct urb *urb = NULL;
3121         struct skb_data *entry;
3122         unsigned long flags;
3123         struct sk_buff_head *tqp = &dev->txq_pend;
3124         struct sk_buff *skb, *skb2;
3125         int ret;
3126         int count, pos;
3127         int skb_totallen, pkt_cnt;
3128
3129         skb_totallen = 0;
3130         pkt_cnt = 0;
3131         count = 0;
3132         length = 0;
3133         spin_lock_irqsave(&tqp->lock, flags);
3134         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3135                 if (skb_is_gso(skb)) {
3136                         if (pkt_cnt) {
3137                                 /* handle previous packets first */
3138                                 break;
3139                         }
3140                         count = 1;
3141                         length = skb->len - TX_OVERHEAD;
3142                         __skb_unlink(skb, tqp);
3143                         spin_unlock_irqrestore(&tqp->lock, flags);
3144                         goto gso_skb;
3145                 }
3146
3147                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3148                         break;
3149                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3150                 pkt_cnt++;
3151         }
3152         spin_unlock_irqrestore(&tqp->lock, flags);
3153
3154         /* copy to a single skb */
3155         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3156         if (!skb)
3157                 goto drop;
3158
3159         skb_put(skb, skb_totallen);
3160
3161         for (count = pos = 0; count < pkt_cnt; count++) {
3162                 skb2 = skb_dequeue(tqp);
3163                 if (skb2) {
3164                         length += (skb2->len - TX_OVERHEAD);
3165                         memcpy(skb->data + pos, skb2->data, skb2->len);
3166                         pos += roundup(skb2->len, sizeof(u32));
3167                         dev_kfree_skb(skb2);
3168                 }
3169         }
3170
3171 gso_skb:
3172         urb = usb_alloc_urb(0, GFP_ATOMIC);
3173         if (!urb)
3174                 goto drop;
3175
3176         entry = (struct skb_data *)skb->cb;
3177         entry->urb = urb;
3178         entry->dev = dev;
3179         entry->length = length;
3180         entry->num_of_packet = count;
3181
3182         spin_lock_irqsave(&dev->txq.lock, flags);
3183         ret = usb_autopm_get_interface_async(dev->intf);
3184         if (ret < 0) {
3185                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3186                 goto drop;
3187         }
3188
3189         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3190                           skb->data, skb->len, tx_complete, skb);
3191
3192         if (length % dev->maxpacket == 0) {
3193                 /* send USB_ZERO_PACKET */
3194                 urb->transfer_flags |= URB_ZERO_PACKET;
3195         }
3196
3197 #ifdef CONFIG_PM
3198         /* if this triggers the device is still a sleep */
3199         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3200                 /* transmission will be done in resume */
3201                 usb_anchor_urb(urb, &dev->deferred);
3202                 /* no use to process more packets */
3203                 netif_stop_queue(dev->net);
3204                 usb_put_urb(urb);
3205                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3206                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3207                 return;
3208         }
3209 #endif
3210
3211         ret = usb_submit_urb(urb, GFP_ATOMIC);
3212         switch (ret) {
3213         case 0:
3214                 netif_trans_update(dev->net);
3215                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3216                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3217                         netif_stop_queue(dev->net);
3218                 break;
3219         case -EPIPE:
3220                 netif_stop_queue(dev->net);
3221                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3222                 usb_autopm_put_interface_async(dev->intf);
3223                 break;
3224         default:
3225                 usb_autopm_put_interface_async(dev->intf);
3226                 netif_dbg(dev, tx_err, dev->net,
3227                           "tx: submit urb err %d\n", ret);
3228                 break;
3229         }
3230
3231         spin_unlock_irqrestore(&dev->txq.lock, flags);
3232
3233         if (ret) {
3234                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3235 drop:
3236                 dev->net->stats.tx_dropped++;
3237                 if (skb)
3238                         dev_kfree_skb_any(skb);
3239                 usb_free_urb(urb);
3240         } else
3241                 netif_dbg(dev, tx_queued, dev->net,
3242                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3243 }
3244
3245 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3246 {
3247         struct urb *urb;
3248         int i;
3249
3250         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3251                 for (i = 0; i < 10; i++) {
3252                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3253                                 break;
3254                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3255                         if (urb)
3256                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3257                                         return;
3258                 }
3259
3260                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3261                         tasklet_schedule(&dev->bh);
3262         }
3263         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3264                 netif_wake_queue(dev->net);
3265 }
3266
3267 static void lan78xx_bh(unsigned long param)
3268 {
3269         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3270         struct sk_buff *skb;
3271         struct skb_data *entry;
3272
3273         while ((skb = skb_dequeue(&dev->done))) {
3274                 entry = (struct skb_data *)(skb->cb);
3275                 switch (entry->state) {
3276                 case rx_done:
3277                         entry->state = rx_cleanup;
3278                         rx_process(dev, skb);
3279                         continue;
3280                 case tx_done:
3281                         usb_free_urb(entry->urb);
3282                         dev_kfree_skb(skb);
3283                         continue;
3284                 case rx_cleanup:
3285                         usb_free_urb(entry->urb);
3286                         dev_kfree_skb(skb);
3287                         continue;
3288                 default:
3289                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3290                         return;
3291                 }
3292         }
3293
3294         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3295                 /* reset update timer delta */
3296                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3297                         dev->delta = 1;
3298                         mod_timer(&dev->stat_monitor,
3299                                   jiffies + STAT_UPDATE_TIMER);
3300                 }
3301
3302                 if (!skb_queue_empty(&dev->txq_pend))
3303                         lan78xx_tx_bh(dev);
3304
3305                 if (!timer_pending(&dev->delay) &&
3306                     !test_bit(EVENT_RX_HALT, &dev->flags))
3307                         lan78xx_rx_bh(dev);
3308         }
3309 }
3310
3311 static void lan78xx_delayedwork(struct work_struct *work)
3312 {
3313         int status;
3314         struct lan78xx_net *dev;
3315
3316         dev = container_of(work, struct lan78xx_net, wq.work);
3317
3318         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3319                 unlink_urbs(dev, &dev->txq);
3320                 status = usb_autopm_get_interface(dev->intf);
3321                 if (status < 0)
3322                         goto fail_pipe;
3323                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3324                 usb_autopm_put_interface(dev->intf);
3325                 if (status < 0 &&
3326                     status != -EPIPE &&
3327                     status != -ESHUTDOWN) {
3328                         if (netif_msg_tx_err(dev))
3329 fail_pipe:
3330                                 netdev_err(dev->net,
3331                                            "can't clear tx halt, status %d\n",
3332                                            status);
3333                 } else {
3334                         clear_bit(EVENT_TX_HALT, &dev->flags);
3335                         if (status != -ESHUTDOWN)
3336                                 netif_wake_queue(dev->net);
3337                 }
3338         }
3339         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3340                 unlink_urbs(dev, &dev->rxq);
3341                 status = usb_autopm_get_interface(dev->intf);
3342                 if (status < 0)
3343                                 goto fail_halt;
3344                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3345                 usb_autopm_put_interface(dev->intf);
3346                 if (status < 0 &&
3347                     status != -EPIPE &&
3348                     status != -ESHUTDOWN) {
3349                         if (netif_msg_rx_err(dev))
3350 fail_halt:
3351                                 netdev_err(dev->net,
3352                                            "can't clear rx halt, status %d\n",
3353                                            status);
3354                 } else {
3355                         clear_bit(EVENT_RX_HALT, &dev->flags);
3356                         tasklet_schedule(&dev->bh);
3357                 }
3358         }
3359
3360         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3361                 int ret = 0;
3362
3363                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3364                 status = usb_autopm_get_interface(dev->intf);
3365                 if (status < 0)
3366                         goto skip_reset;
3367                 if (lan78xx_link_reset(dev) < 0) {
3368                         usb_autopm_put_interface(dev->intf);
3369 skip_reset:
3370                         netdev_info(dev->net, "link reset failed (%d)\n",
3371                                     ret);
3372                 } else {
3373                         usb_autopm_put_interface(dev->intf);
3374                 }
3375         }
3376
3377         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3378                 lan78xx_update_stats(dev);
3379
3380                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3381
3382                 mod_timer(&dev->stat_monitor,
3383                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3384
3385                 dev->delta = min((dev->delta * 2), 50);
3386         }
3387 }
3388
3389 static void intr_complete(struct urb *urb)
3390 {
3391         struct lan78xx_net *dev = urb->context;
3392         int status = urb->status;
3393
3394         switch (status) {
3395         /* success */
3396         case 0:
3397                 lan78xx_status(dev, urb);
3398                 break;
3399
3400         /* software-driven interface shutdown */
3401         case -ENOENT:                   /* urb killed */
3402         case -ESHUTDOWN:                /* hardware gone */
3403                 netif_dbg(dev, ifdown, dev->net,
3404                           "intr shutdown, code %d\n", status);
3405                 return;
3406
3407         /* NOTE:  not throttling like RX/TX, since this endpoint
3408          * already polls infrequently
3409          */
3410         default:
3411                 netdev_dbg(dev->net, "intr status %d\n", status);
3412                 break;
3413         }
3414
3415         if (!netif_running(dev->net))
3416                 return;
3417
3418         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3419         status = usb_submit_urb(urb, GFP_ATOMIC);
3420         if (status != 0)
3421                 netif_err(dev, timer, dev->net,
3422                           "intr resubmit --> %d\n", status);
3423 }
3424
3425 static void lan78xx_disconnect(struct usb_interface *intf)
3426 {
3427         struct lan78xx_net              *dev;
3428         struct usb_device               *udev;
3429         struct net_device               *net;
3430
3431         dev = usb_get_intfdata(intf);
3432         usb_set_intfdata(intf, NULL);
3433         if (!dev)
3434                 return;
3435
3436         udev = interface_to_usbdev(intf);
3437         net = dev->net;
3438
3439         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3440         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3441
3442         phy_disconnect(net->phydev);
3443
3444         unregister_netdev(net);
3445
3446         cancel_delayed_work_sync(&dev->wq);
3447
3448         usb_scuttle_anchored_urbs(&dev->deferred);
3449
3450         lan78xx_unbind(dev, intf);
3451
3452         usb_kill_urb(dev->urb_intr);
3453         usb_free_urb(dev->urb_intr);
3454
3455         free_netdev(net);
3456         usb_put_dev(udev);
3457 }
3458
3459 static void lan78xx_tx_timeout(struct net_device *net)
3460 {
3461         struct lan78xx_net *dev = netdev_priv(net);
3462
3463         unlink_urbs(dev, &dev->txq);
3464         tasklet_schedule(&dev->bh);
3465 }
3466
3467 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3468                                                 struct net_device *netdev,
3469                                                 netdev_features_t features)
3470 {
3471         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3472                 features &= ~NETIF_F_GSO_MASK;
3473
3474         features = vlan_features_check(skb, features);
3475         features = vxlan_features_check(skb, features);
3476
3477         return features;
3478 }
3479
3480 static const struct net_device_ops lan78xx_netdev_ops = {
3481         .ndo_open               = lan78xx_open,
3482         .ndo_stop               = lan78xx_stop,
3483         .ndo_start_xmit         = lan78xx_start_xmit,
3484         .ndo_tx_timeout         = lan78xx_tx_timeout,
3485         .ndo_change_mtu         = lan78xx_change_mtu,
3486         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3487         .ndo_validate_addr      = eth_validate_addr,
3488         .ndo_do_ioctl           = lan78xx_ioctl,
3489         .ndo_set_rx_mode        = lan78xx_set_multicast,
3490         .ndo_set_features       = lan78xx_set_features,
3491         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3492         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3493         .ndo_features_check     = lan78xx_features_check,
3494 };
3495
3496 static void lan78xx_stat_monitor(unsigned long param)
3497 {
3498         struct lan78xx_net *dev;
3499
3500         dev = (struct lan78xx_net *)param;
3501
3502         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3503 }
3504
3505 static int lan78xx_probe(struct usb_interface *intf,
3506                          const struct usb_device_id *id)
3507 {
3508         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3509         struct lan78xx_net *dev;
3510         struct net_device *netdev;
3511         struct usb_device *udev;
3512         int ret;
3513         unsigned maxp;
3514         unsigned period;
3515         u8 *buf = NULL;
3516
3517         udev = interface_to_usbdev(intf);
3518         udev = usb_get_dev(udev);
3519
3520         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3521         if (!netdev) {
3522                 dev_err(&intf->dev, "Error: OOM\n");
3523                 ret = -ENOMEM;
3524                 goto out1;
3525         }
3526
3527         /* netdev_printk() needs this */
3528         SET_NETDEV_DEV(netdev, &intf->dev);
3529
3530         dev = netdev_priv(netdev);
3531         dev->udev = udev;
3532         dev->intf = intf;
3533         dev->net = netdev;
3534         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3535                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3536
3537         skb_queue_head_init(&dev->rxq);
3538         skb_queue_head_init(&dev->txq);
3539         skb_queue_head_init(&dev->done);
3540         skb_queue_head_init(&dev->rxq_pause);
3541         skb_queue_head_init(&dev->txq_pend);
3542         mutex_init(&dev->phy_mutex);
3543
3544         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3545         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3546         init_usb_anchor(&dev->deferred);
3547
3548         netdev->netdev_ops = &lan78xx_netdev_ops;
3549         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3550         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3551
3552         dev->stat_monitor.function = lan78xx_stat_monitor;
3553         dev->stat_monitor.data = (unsigned long)dev;
3554         dev->delta = 1;
3555         init_timer(&dev->stat_monitor);
3556
3557         mutex_init(&dev->stats.access_lock);
3558
3559         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3560                 ret = -ENODEV;
3561                 goto out2;
3562         }
3563
3564         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3565         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3566         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3567                 ret = -ENODEV;
3568                 goto out2;
3569         }
3570
3571         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3572         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3573         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3574                 ret = -ENODEV;
3575                 goto out2;
3576         }
3577
3578         ep_intr = &intf->cur_altsetting->endpoint[2];
3579         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3580                 ret = -ENODEV;
3581                 goto out2;
3582         }
3583
3584         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3585                                         usb_endpoint_num(&ep_intr->desc));
3586
3587         ret = lan78xx_bind(dev, intf);
3588         if (ret < 0)
3589                 goto out2;
3590         strcpy(netdev->name, "eth%d");
3591
3592         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3593                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3594
3595         /* MTU range: 68 - 9000 */
3596         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3597         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3598
3599         period = ep_intr->desc.bInterval;
3600         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3601         buf = kmalloc(maxp, GFP_KERNEL);
3602         if (buf) {
3603                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3604                 if (!dev->urb_intr) {
3605                         ret = -ENOMEM;
3606                         kfree(buf);
3607                         goto out3;
3608                 } else {
3609                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3610                                          dev->pipe_intr, buf, maxp,
3611                                          intr_complete, dev, period);
3612                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3613                 }
3614         }
3615
3616         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3617
3618         /* Reject broken descriptors. */
3619         if (dev->maxpacket == 0) {
3620                 ret = -ENODEV;
3621                 goto out4;
3622         }
3623
3624         /* driver requires remote-wakeup capability during autosuspend. */
3625         intf->needs_remote_wakeup = 1;
3626
3627         ret = lan78xx_phy_init(dev);
3628         if (ret < 0)
3629                 goto out4;
3630
3631         ret = register_netdev(netdev);
3632         if (ret != 0) {
3633                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3634                 goto out5;
3635         }
3636
3637         usb_set_intfdata(intf, dev);
3638
3639         ret = device_set_wakeup_enable(&udev->dev, true);
3640
3641          /* Default delay of 2sec has more overhead than advantage.
3642           * Set to 10sec as default.
3643           */
3644         pm_runtime_set_autosuspend_delay(&udev->dev,
3645                                          DEFAULT_AUTOSUSPEND_DELAY);
3646
3647         return 0;
3648
3649 out5:
3650         phy_disconnect(netdev->phydev);
3651 out4:
3652         usb_free_urb(dev->urb_intr);
3653 out3:
3654         lan78xx_unbind(dev, intf);
3655 out2:
3656         free_netdev(netdev);
3657 out1:
3658         usb_put_dev(udev);
3659
3660         return ret;
3661 }
3662
3663 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3664 {
3665         const u16 crc16poly = 0x8005;
3666         int i;
3667         u16 bit, crc, msb;
3668         u8 data;
3669
3670         crc = 0xFFFF;
3671         for (i = 0; i < len; i++) {
3672                 data = *buf++;
3673                 for (bit = 0; bit < 8; bit++) {
3674                         msb = crc >> 15;
3675                         crc <<= 1;
3676
3677                         if (msb ^ (u16)(data & 1)) {
3678                                 crc ^= crc16poly;
3679                                 crc |= (u16)0x0001U;
3680                         }
3681                         data >>= 1;
3682                 }
3683         }
3684
3685         return crc;
3686 }
3687
3688 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3689 {
3690         u32 buf;
3691         int ret;
3692         int mask_index;
3693         u16 crc;
3694         u32 temp_wucsr;
3695         u32 temp_pmt_ctl;
3696         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3697         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3698         const u8 arp_type[2] = { 0x08, 0x06 };
3699
3700         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3701         buf &= ~MAC_TX_TXEN_;
3702         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3703         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3704         buf &= ~MAC_RX_RXEN_;
3705         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3706
3707         ret = lan78xx_write_reg(dev, WUCSR, 0);
3708         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3709         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3710
3711         temp_wucsr = 0;
3712
3713         temp_pmt_ctl = 0;
3714         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3715         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3716         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3717
3718         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3719                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3720
3721         mask_index = 0;
3722         if (wol & WAKE_PHY) {
3723                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3724
3725                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3726                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3727                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3728         }
3729         if (wol & WAKE_MAGIC) {
3730                 temp_wucsr |= WUCSR_MPEN_;
3731
3732                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3733                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3734                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3735         }
3736         if (wol & WAKE_BCAST) {
3737                 temp_wucsr |= WUCSR_BCST_EN_;
3738
3739                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3740                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3741                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3742         }
3743         if (wol & WAKE_MCAST) {
3744                 temp_wucsr |= WUCSR_WAKE_EN_;
3745
3746                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3747                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3748                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3749                                         WUF_CFGX_EN_ |
3750                                         WUF_CFGX_TYPE_MCAST_ |
3751                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3752                                         (crc & WUF_CFGX_CRC16_MASK_));
3753
3754                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3755                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3756                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3757                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3758                 mask_index++;
3759
3760                 /* for IPv6 Multicast */
3761                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3762                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3763                                         WUF_CFGX_EN_ |
3764                                         WUF_CFGX_TYPE_MCAST_ |
3765                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3766                                         (crc & WUF_CFGX_CRC16_MASK_));
3767
3768                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3769                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3770                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3771                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3772                 mask_index++;
3773
3774                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3775                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3776                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3777         }
3778         if (wol & WAKE_UCAST) {
3779                 temp_wucsr |= WUCSR_PFDA_EN_;
3780
3781                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3782                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3783                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3784         }
3785         if (wol & WAKE_ARP) {
3786                 temp_wucsr |= WUCSR_WAKE_EN_;
3787
3788                 /* set WUF_CFG & WUF_MASK
3789                  * for packettype (offset 12,13) = ARP (0x0806)
3790                  */
3791                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3792                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3793                                         WUF_CFGX_EN_ |
3794                                         WUF_CFGX_TYPE_ALL_ |
3795                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3796                                         (crc & WUF_CFGX_CRC16_MASK_));
3797
3798                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3799                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3800                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3801                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3802                 mask_index++;
3803
3804                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3805                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3806                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3807         }
3808
3809         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3810
3811         /* when multiple WOL bits are set */
3812         if (hweight_long((unsigned long)wol) > 1) {
3813                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3814                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3815                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3816         }
3817         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3818
3819         /* clear WUPS */
3820         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3821         buf |= PMT_CTL_WUPS_MASK_;
3822         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3823
3824         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3825         buf |= MAC_RX_RXEN_;
3826         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3827
3828         return 0;
3829 }
3830
3831 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3832 {
3833         struct lan78xx_net *dev = usb_get_intfdata(intf);
3834         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3835         u32 buf;
3836         int ret;
3837         int event;
3838
3839         event = message.event;
3840
3841         if (!dev->suspend_count++) {
3842                 spin_lock_irq(&dev->txq.lock);
3843                 /* don't autosuspend while transmitting */
3844                 if ((skb_queue_len(&dev->txq) ||
3845                      skb_queue_len(&dev->txq_pend)) &&
3846                         PMSG_IS_AUTO(message)) {
3847                         spin_unlock_irq(&dev->txq.lock);
3848                         ret = -EBUSY;
3849                         goto out;
3850                 } else {
3851                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3852                         spin_unlock_irq(&dev->txq.lock);
3853                 }
3854
3855                 /* stop TX & RX */
3856                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3857                 buf &= ~MAC_TX_TXEN_;
3858                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3859                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3860                 buf &= ~MAC_RX_RXEN_;
3861                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3862
3863                 /* empty out the rx and queues */
3864                 netif_device_detach(dev->net);
3865                 lan78xx_terminate_urbs(dev);
3866                 usb_kill_urb(dev->urb_intr);
3867
3868                 /* reattach */
3869                 netif_device_attach(dev->net);
3870         }
3871
3872         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3873                 del_timer(&dev->stat_monitor);
3874
3875                 if (PMSG_IS_AUTO(message)) {
3876                         /* auto suspend (selective suspend) */
3877                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3878                         buf &= ~MAC_TX_TXEN_;
3879                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3880                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3881                         buf &= ~MAC_RX_RXEN_;
3882                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3883
3884                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3885                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3886                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3887
3888                         /* set goodframe wakeup */
3889                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3890
3891                         buf |= WUCSR_RFE_WAKE_EN_;
3892                         buf |= WUCSR_STORE_WAKE_;
3893
3894                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3895
3896                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3897
3898                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3899                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3900
3901                         buf |= PMT_CTL_PHY_WAKE_EN_;
3902                         buf |= PMT_CTL_WOL_EN_;
3903                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3904                         buf |= PMT_CTL_SUS_MODE_3_;
3905
3906                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3907
3908                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3909
3910                         buf |= PMT_CTL_WUPS_MASK_;
3911
3912                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3913
3914                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3915                         buf |= MAC_RX_RXEN_;
3916                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3917                 } else {
3918                         lan78xx_set_suspend(dev, pdata->wol);
3919                 }
3920         }
3921
3922         ret = 0;
3923 out:
3924         return ret;
3925 }
3926
3927 static int lan78xx_resume(struct usb_interface *intf)
3928 {
3929         struct lan78xx_net *dev = usb_get_intfdata(intf);
3930         struct sk_buff *skb;
3931         struct urb *res;
3932         int ret;
3933         u32 buf;
3934
3935         if (!timer_pending(&dev->stat_monitor)) {
3936                 dev->delta = 1;
3937                 mod_timer(&dev->stat_monitor,
3938                           jiffies + STAT_UPDATE_TIMER);
3939         }
3940
3941         if (!--dev->suspend_count) {
3942                 /* resume interrupt URBs */
3943                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3944                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3945
3946                 spin_lock_irq(&dev->txq.lock);
3947                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3948                         skb = (struct sk_buff *)res->context;
3949                         ret = usb_submit_urb(res, GFP_ATOMIC);
3950                         if (ret < 0) {
3951                                 dev_kfree_skb_any(skb);
3952                                 usb_free_urb(res);
3953                                 usb_autopm_put_interface_async(dev->intf);
3954                         } else {
3955                                 netif_trans_update(dev->net);
3956                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3957                         }
3958                 }
3959
3960                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3961                 spin_unlock_irq(&dev->txq.lock);
3962
3963                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3964                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3965                                 netif_start_queue(dev->net);
3966                         tasklet_schedule(&dev->bh);
3967                 }
3968         }
3969
3970         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3971         ret = lan78xx_write_reg(dev, WUCSR, 0);
3972         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3973
3974         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3975                                              WUCSR2_ARP_RCD_ |
3976                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3977                                              WUCSR2_IPV4_TCPSYN_RCD_);
3978
3979         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3980                                             WUCSR_EEE_RX_WAKE_ |
3981                                             WUCSR_PFDA_FR_ |
3982                                             WUCSR_RFE_WAKE_FR_ |
3983                                             WUCSR_WUFR_ |
3984                                             WUCSR_MPR_ |
3985                                             WUCSR_BCST_FR_);
3986
3987         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3988         buf |= MAC_TX_TXEN_;
3989         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3990
3991         return 0;
3992 }
3993
3994 static int lan78xx_reset_resume(struct usb_interface *intf)
3995 {
3996         struct lan78xx_net *dev = usb_get_intfdata(intf);
3997
3998         lan78xx_reset(dev);
3999
4000         phy_start(dev->net->phydev);
4001
4002         return lan78xx_resume(intf);
4003 }
4004
4005 static const struct usb_device_id products[] = {
4006         {
4007         /* LAN7800 USB Gigabit Ethernet Device */
4008         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4009         },
4010         {
4011         /* LAN7850 USB Gigabit Ethernet Device */
4012         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4013         },
4014         {
4015         /* LAN7801 USB Gigabit Ethernet Device */
4016         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4017         },
4018         {
4019         /* ATM2-AF USB Gigabit Ethernet Device */
4020         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4021         },
4022         {},
4023 };
4024 MODULE_DEVICE_TABLE(usb, products);
4025
4026 static struct usb_driver lan78xx_driver = {
4027         .name                   = DRIVER_NAME,
4028         .id_table               = products,
4029         .probe                  = lan78xx_probe,
4030         .disconnect             = lan78xx_disconnect,
4031         .suspend                = lan78xx_suspend,
4032         .resume                 = lan78xx_resume,
4033         .reset_resume           = lan78xx_reset_resume,
4034         .supports_autosuspend   = 1,
4035         .disable_hub_initiated_lpm = 1,
4036 };
4037
4038 module_usb_driver(lan78xx_driver);
4039
4040 MODULE_AUTHOR(DRIVER_AUTHOR);
4041 MODULE_DESCRIPTION(DRIVER_DESC);
4042 MODULE_LICENSE("GPL");