2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include <linux/of_net.h>
38 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
39 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
40 #define DRIVER_NAME "lan78xx"
41 #define DRIVER_VERSION "1.0.4"
43 #define TX_TIMEOUT_JIFFIES (5 * HZ)
44 #define THROTTLE_JIFFIES (HZ / 8)
45 #define UNLINK_TIMEOUT_MS 3
47 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
49 #define SS_USB_PKT_SIZE (1024)
50 #define HS_USB_PKT_SIZE (512)
51 #define FS_USB_PKT_SIZE (64)
53 #define MAX_RX_FIFO_SIZE (12 * 1024)
54 #define MAX_TX_FIFO_SIZE (12 * 1024)
55 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
56 #define DEFAULT_BULK_IN_DELAY (0x0800)
57 #define MAX_SINGLE_PACKET_SIZE (9000)
58 #define DEFAULT_TX_CSUM_ENABLE (true)
59 #define DEFAULT_RX_CSUM_ENABLE (true)
60 #define DEFAULT_TSO_CSUM_ENABLE (true)
61 #define DEFAULT_VLAN_FILTER_ENABLE (true)
62 #define TX_OVERHEAD (8)
65 #define LAN78XX_USB_VENDOR_ID (0x0424)
66 #define LAN7800_USB_PRODUCT_ID (0x7800)
67 #define LAN7850_USB_PRODUCT_ID (0x7850)
68 #define LAN78XX_EEPROM_MAGIC (0x78A5)
69 #define LAN78XX_OTP_MAGIC (0x78F3)
74 #define EEPROM_INDICATOR (0xA5)
75 #define EEPROM_MAC_OFFSET (0x01)
76 #define MAX_EEPROM_SIZE 512
77 #define OTP_INDICATOR_1 (0xF3)
78 #define OTP_INDICATOR_2 (0xF7)
80 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
81 WAKE_MCAST | WAKE_BCAST | \
82 WAKE_ARP | WAKE_MAGIC)
84 /* USB related defines */
85 #define BULK_IN_PIPE 1
86 #define BULK_OUT_PIPE 2
88 /* default autosuspend delay (mSec)*/
89 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
91 /* statistic update interval (mSec) */
92 #define STAT_UPDATE_TIMER (1 * 1000)
94 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
96 "RX Alignment Errors",
99 "RX Undersize Frame Errors",
100 "RX Oversize Frame Errors",
102 "RX Unicast Byte Count",
103 "RX Broadcast Byte Count",
104 "RX Multicast Byte Count",
106 "RX Broadcast Frames",
107 "RX Multicast Frames",
110 "RX 65 - 127 Byte Frames",
111 "RX 128 - 255 Byte Frames",
112 "RX 256 - 511 Bytes Frames",
113 "RX 512 - 1023 Byte Frames",
114 "RX 1024 - 1518 Byte Frames",
115 "RX Greater 1518 Byte Frames",
116 "EEE RX LPI Transitions",
119 "TX Excess Deferral Errors",
122 "TX Single Collisions",
123 "TX Multiple Collisions",
124 "TX Excessive Collision",
125 "TX Late Collisions",
126 "TX Unicast Byte Count",
127 "TX Broadcast Byte Count",
128 "TX Multicast Byte Count",
130 "TX Broadcast Frames",
131 "TX Multicast Frames",
134 "TX 65 - 127 Byte Frames",
135 "TX 128 - 255 Byte Frames",
136 "TX 256 - 511 Bytes Frames",
137 "TX 512 - 1023 Byte Frames",
138 "TX 1024 - 1518 Byte Frames",
139 "TX Greater 1518 Byte Frames",
140 "EEE TX LPI Transitions",
144 struct lan78xx_statstage {
146 u32 rx_alignment_errors;
147 u32 rx_fragment_errors;
148 u32 rx_jabber_errors;
149 u32 rx_undersize_frame_errors;
150 u32 rx_oversize_frame_errors;
151 u32 rx_dropped_frames;
152 u32 rx_unicast_byte_count;
153 u32 rx_broadcast_byte_count;
154 u32 rx_multicast_byte_count;
155 u32 rx_unicast_frames;
156 u32 rx_broadcast_frames;
157 u32 rx_multicast_frames;
159 u32 rx_64_byte_frames;
160 u32 rx_65_127_byte_frames;
161 u32 rx_128_255_byte_frames;
162 u32 rx_256_511_bytes_frames;
163 u32 rx_512_1023_byte_frames;
164 u32 rx_1024_1518_byte_frames;
165 u32 rx_greater_1518_byte_frames;
166 u32 eee_rx_lpi_transitions;
169 u32 tx_excess_deferral_errors;
170 u32 tx_carrier_errors;
171 u32 tx_bad_byte_count;
172 u32 tx_single_collisions;
173 u32 tx_multiple_collisions;
174 u32 tx_excessive_collision;
175 u32 tx_late_collisions;
176 u32 tx_unicast_byte_count;
177 u32 tx_broadcast_byte_count;
178 u32 tx_multicast_byte_count;
179 u32 tx_unicast_frames;
180 u32 tx_broadcast_frames;
181 u32 tx_multicast_frames;
183 u32 tx_64_byte_frames;
184 u32 tx_65_127_byte_frames;
185 u32 tx_128_255_byte_frames;
186 u32 tx_256_511_bytes_frames;
187 u32 tx_512_1023_byte_frames;
188 u32 tx_1024_1518_byte_frames;
189 u32 tx_greater_1518_byte_frames;
190 u32 eee_tx_lpi_transitions;
194 struct lan78xx_statstage64 {
196 u64 rx_alignment_errors;
197 u64 rx_fragment_errors;
198 u64 rx_jabber_errors;
199 u64 rx_undersize_frame_errors;
200 u64 rx_oversize_frame_errors;
201 u64 rx_dropped_frames;
202 u64 rx_unicast_byte_count;
203 u64 rx_broadcast_byte_count;
204 u64 rx_multicast_byte_count;
205 u64 rx_unicast_frames;
206 u64 rx_broadcast_frames;
207 u64 rx_multicast_frames;
209 u64 rx_64_byte_frames;
210 u64 rx_65_127_byte_frames;
211 u64 rx_128_255_byte_frames;
212 u64 rx_256_511_bytes_frames;
213 u64 rx_512_1023_byte_frames;
214 u64 rx_1024_1518_byte_frames;
215 u64 rx_greater_1518_byte_frames;
216 u64 eee_rx_lpi_transitions;
219 u64 tx_excess_deferral_errors;
220 u64 tx_carrier_errors;
221 u64 tx_bad_byte_count;
222 u64 tx_single_collisions;
223 u64 tx_multiple_collisions;
224 u64 tx_excessive_collision;
225 u64 tx_late_collisions;
226 u64 tx_unicast_byte_count;
227 u64 tx_broadcast_byte_count;
228 u64 tx_multicast_byte_count;
229 u64 tx_unicast_frames;
230 u64 tx_broadcast_frames;
231 u64 tx_multicast_frames;
233 u64 tx_64_byte_frames;
234 u64 tx_65_127_byte_frames;
235 u64 tx_128_255_byte_frames;
236 u64 tx_256_511_bytes_frames;
237 u64 tx_512_1023_byte_frames;
238 u64 tx_1024_1518_byte_frames;
239 u64 tx_greater_1518_byte_frames;
240 u64 eee_tx_lpi_transitions;
246 struct lan78xx_priv {
247 struct lan78xx_net *dev;
249 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
250 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
251 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
252 struct mutex dataport_mutex; /* for dataport access */
253 spinlock_t rfe_ctl_lock; /* for rfe register access */
254 struct work_struct set_multicast;
255 struct work_struct set_vlan;
269 struct skb_data { /* skb->cb is one of these */
271 struct lan78xx_net *dev;
272 enum skb_state state;
278 struct usb_ctrlrequest req;
279 struct lan78xx_net *dev;
282 #define EVENT_TX_HALT 0
283 #define EVENT_RX_HALT 1
284 #define EVENT_RX_MEMORY 2
285 #define EVENT_STS_SPLIT 3
286 #define EVENT_LINK_RESET 4
287 #define EVENT_RX_PAUSED 5
288 #define EVENT_DEV_WAKING 6
289 #define EVENT_DEV_ASLEEP 7
290 #define EVENT_DEV_OPEN 8
291 #define EVENT_STAT_UPDATE 9
294 struct mutex access_lock; /* for stats access */
295 struct lan78xx_statstage saved;
296 struct lan78xx_statstage rollover_count;
297 struct lan78xx_statstage rollover_max;
298 struct lan78xx_statstage64 curr_stat;
302 struct net_device *net;
303 struct usb_device *udev;
304 struct usb_interface *intf;
309 struct sk_buff_head rxq;
310 struct sk_buff_head txq;
311 struct sk_buff_head done;
312 struct sk_buff_head rxq_pause;
313 struct sk_buff_head txq_pend;
315 struct tasklet_struct bh;
316 struct delayed_work wq;
320 struct urb *urb_intr;
321 struct usb_anchor deferred;
323 struct mutex phy_mutex; /* for phy access */
324 unsigned pipe_in, pipe_out, pipe_intr;
326 u32 hard_mtu; /* count any extra framing */
327 size_t rx_urb_size; /* size for rx urbs */
331 wait_queue_head_t *wait;
332 unsigned char suspend_count;
335 struct timer_list delay;
336 struct timer_list stat_monitor;
338 unsigned long data[5];
345 struct mii_bus *mdiobus;
348 u8 fc_request_control;
351 struct statstage stats;
354 /* use ethtool to change the level for any given device */
355 static int msg_level = -1;
356 module_param(msg_level, int, 0);
357 MODULE_PARM_DESC(msg_level, "Override default message level");
359 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
361 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
367 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
368 USB_VENDOR_REQUEST_READ_REGISTER,
369 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
370 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
371 if (likely(ret >= 0)) {
375 netdev_warn(dev->net,
376 "Failed to read register index 0x%08x. ret = %d",
385 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
387 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
396 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
397 USB_VENDOR_REQUEST_WRITE_REGISTER,
398 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
399 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
400 if (unlikely(ret < 0)) {
401 netdev_warn(dev->net,
402 "Failed to write register index 0x%08x. ret = %d",
411 static int lan78xx_read_stats(struct lan78xx_net *dev,
412 struct lan78xx_statstage *data)
416 struct lan78xx_statstage *stats;
420 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
424 ret = usb_control_msg(dev->udev,
425 usb_rcvctrlpipe(dev->udev, 0),
426 USB_VENDOR_REQUEST_GET_STATS,
427 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
432 USB_CTRL_SET_TIMEOUT);
433 if (likely(ret >= 0)) {
436 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
437 le32_to_cpus(&src[i]);
441 netdev_warn(dev->net,
442 "Failed to read stat ret = %d", ret);
450 #define check_counter_rollover(struct1, dev_stats, member) { \
451 if (struct1->member < dev_stats.saved.member) \
452 dev_stats.rollover_count.member++; \
455 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
456 struct lan78xx_statstage *stats)
458 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
459 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
460 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
461 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
462 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
463 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
464 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
465 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
466 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
467 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
468 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
469 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
470 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
471 check_counter_rollover(stats, dev->stats, rx_pause_frames);
472 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
473 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
474 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
475 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
476 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
477 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
478 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
479 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
480 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
481 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
482 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
483 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
484 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
485 check_counter_rollover(stats, dev->stats, tx_single_collisions);
486 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
487 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
488 check_counter_rollover(stats, dev->stats, tx_late_collisions);
489 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
490 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
491 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
492 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
493 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
494 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
495 check_counter_rollover(stats, dev->stats, tx_pause_frames);
496 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
497 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
498 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
499 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
500 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
501 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
502 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
503 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
504 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
506 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
509 static void lan78xx_update_stats(struct lan78xx_net *dev)
511 u32 *p, *count, *max;
514 struct lan78xx_statstage lan78xx_stats;
516 if (usb_autopm_get_interface(dev->intf) < 0)
519 p = (u32 *)&lan78xx_stats;
520 count = (u32 *)&dev->stats.rollover_count;
521 max = (u32 *)&dev->stats.rollover_max;
522 data = (u64 *)&dev->stats.curr_stat;
524 mutex_lock(&dev->stats.access_lock);
526 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
527 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
529 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
530 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
532 mutex_unlock(&dev->stats.access_lock);
534 usb_autopm_put_interface(dev->intf);
537 /* Loop until the read is completed with timeout called with phy_mutex held */
538 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
540 unsigned long start_time = jiffies;
545 ret = lan78xx_read_reg(dev, MII_ACC, &val);
546 if (unlikely(ret < 0))
549 if (!(val & MII_ACC_MII_BUSY_))
551 } while (!time_after(jiffies, start_time + HZ));
556 static inline u32 mii_access(int id, int index, int read)
560 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
561 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
563 ret |= MII_ACC_MII_READ_;
565 ret |= MII_ACC_MII_WRITE_;
566 ret |= MII_ACC_MII_BUSY_;
571 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
573 unsigned long start_time = jiffies;
578 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
579 if (unlikely(ret < 0))
582 if (!(val & E2P_CMD_EPC_BUSY_) ||
583 (val & E2P_CMD_EPC_TIMEOUT_))
585 usleep_range(40, 100);
586 } while (!time_after(jiffies, start_time + HZ));
588 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
589 netdev_warn(dev->net, "EEPROM read operation timeout");
596 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
598 unsigned long start_time = jiffies;
603 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
604 if (unlikely(ret < 0))
607 if (!(val & E2P_CMD_EPC_BUSY_))
610 usleep_range(40, 100);
611 } while (!time_after(jiffies, start_time + HZ));
613 netdev_warn(dev->net, "EEPROM is busy");
617 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
618 u32 length, u8 *data)
625 /* depends on chip, some EEPROM pins are muxed with LED function.
626 * disable & restore LED function to access EEPROM.
628 ret = lan78xx_read_reg(dev, HW_CFG, &val);
630 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
631 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
632 ret = lan78xx_write_reg(dev, HW_CFG, val);
635 retval = lan78xx_eeprom_confirm_not_busy(dev);
639 for (i = 0; i < length; i++) {
640 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
641 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
642 ret = lan78xx_write_reg(dev, E2P_CMD, val);
643 if (unlikely(ret < 0)) {
648 retval = lan78xx_wait_eeprom(dev);
652 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
653 if (unlikely(ret < 0)) {
658 data[i] = val & 0xFF;
664 if (dev->chipid == ID_REV_CHIP_ID_7800_)
665 ret = lan78xx_write_reg(dev, HW_CFG, saved);
670 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
671 u32 length, u8 *data)
676 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
677 if ((ret == 0) && (sig == EEPROM_INDICATOR))
678 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
685 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
686 u32 length, u8 *data)
693 /* depends on chip, some EEPROM pins are muxed with LED function.
694 * disable & restore LED function to access EEPROM.
696 ret = lan78xx_read_reg(dev, HW_CFG, &val);
698 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
699 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
700 ret = lan78xx_write_reg(dev, HW_CFG, val);
703 retval = lan78xx_eeprom_confirm_not_busy(dev);
707 /* Issue write/erase enable command */
708 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
709 ret = lan78xx_write_reg(dev, E2P_CMD, val);
710 if (unlikely(ret < 0)) {
715 retval = lan78xx_wait_eeprom(dev);
719 for (i = 0; i < length; i++) {
720 /* Fill data register */
722 ret = lan78xx_write_reg(dev, E2P_DATA, val);
728 /* Send "write" command */
729 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
730 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
731 ret = lan78xx_write_reg(dev, E2P_CMD, val);
737 retval = lan78xx_wait_eeprom(dev);
746 if (dev->chipid == ID_REV_CHIP_ID_7800_)
747 ret = lan78xx_write_reg(dev, HW_CFG, saved);
752 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
753 u32 length, u8 *data)
758 unsigned long timeout;
760 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
762 if (buf & OTP_PWR_DN_PWRDN_N_) {
763 /* clear it and wait to be cleared */
764 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
766 timeout = jiffies + HZ;
769 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
770 if (time_after(jiffies, timeout)) {
771 netdev_warn(dev->net,
772 "timeout on OTP_PWR_DN");
775 } while (buf & OTP_PWR_DN_PWRDN_N_);
778 for (i = 0; i < length; i++) {
779 ret = lan78xx_write_reg(dev, OTP_ADDR1,
780 ((offset + i) >> 8) & OTP_ADDR1_15_11);
781 ret = lan78xx_write_reg(dev, OTP_ADDR2,
782 ((offset + i) & OTP_ADDR2_10_3));
784 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
785 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
787 timeout = jiffies + HZ;
790 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
791 if (time_after(jiffies, timeout)) {
792 netdev_warn(dev->net,
793 "timeout on OTP_STATUS");
796 } while (buf & OTP_STATUS_BUSY_);
798 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
800 data[i] = (u8)(buf & 0xFF);
806 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
807 u32 length, u8 *data)
812 unsigned long timeout;
814 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816 if (buf & OTP_PWR_DN_PWRDN_N_) {
817 /* clear it and wait to be cleared */
818 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820 timeout = jiffies + HZ;
823 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
824 if (time_after(jiffies, timeout)) {
825 netdev_warn(dev->net,
826 "timeout on OTP_PWR_DN completion");
829 } while (buf & OTP_PWR_DN_PWRDN_N_);
832 /* set to BYTE program mode */
833 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
835 for (i = 0; i < length; i++) {
836 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837 ((offset + i) >> 8) & OTP_ADDR1_15_11);
838 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839 ((offset + i) & OTP_ADDR2_10_3));
840 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
841 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
842 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844 timeout = jiffies + HZ;
847 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848 if (time_after(jiffies, timeout)) {
849 netdev_warn(dev->net,
850 "Timeout on OTP_STATUS completion");
853 } while (buf & OTP_STATUS_BUSY_);
859 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
860 u32 length, u8 *data)
865 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
868 if (sig == OTP_INDICATOR_2)
870 else if (sig != OTP_INDICATOR_1)
873 ret = lan78xx_read_raw_otp(dev, offset, length, data);
879 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
883 for (i = 0; i < 100; i++) {
886 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
887 if (unlikely(ret < 0))
890 if (dp_sel & DP_SEL_DPRDY_)
893 usleep_range(40, 100);
896 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
901 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
902 u32 addr, u32 length, u32 *buf)
904 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
908 if (usb_autopm_get_interface(dev->intf) < 0)
911 mutex_lock(&pdata->dataport_mutex);
913 ret = lan78xx_dataport_wait_not_busy(dev);
917 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
919 dp_sel &= ~DP_SEL_RSEL_MASK_;
920 dp_sel |= ram_select;
921 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
923 for (i = 0; i < length; i++) {
924 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
926 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
928 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
930 ret = lan78xx_dataport_wait_not_busy(dev);
936 mutex_unlock(&pdata->dataport_mutex);
937 usb_autopm_put_interface(dev->intf);
942 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
943 int index, u8 addr[ETH_ALEN])
947 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
949 temp = addr[2] | (temp << 8);
950 temp = addr[1] | (temp << 8);
951 temp = addr[0] | (temp << 8);
952 pdata->pfilter_table[index][1] = temp;
954 temp = addr[4] | (temp << 8);
955 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
956 pdata->pfilter_table[index][0] = temp;
960 /* returns hash bit number for given MAC address */
961 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
963 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
966 static void lan78xx_deferred_multicast_write(struct work_struct *param)
968 struct lan78xx_priv *pdata =
969 container_of(param, struct lan78xx_priv, set_multicast);
970 struct lan78xx_net *dev = pdata->dev;
974 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
977 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
978 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
980 for (i = 1; i < NUM_OF_MAF; i++) {
981 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
982 ret = lan78xx_write_reg(dev, MAF_LO(i),
983 pdata->pfilter_table[i][1]);
984 ret = lan78xx_write_reg(dev, MAF_HI(i),
985 pdata->pfilter_table[i][0]);
988 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
991 static void lan78xx_set_multicast(struct net_device *netdev)
993 struct lan78xx_net *dev = netdev_priv(netdev);
994 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
998 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1000 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1001 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1003 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1004 pdata->mchash_table[i] = 0;
1005 /* pfilter_table[0] has own HW address */
1006 for (i = 1; i < NUM_OF_MAF; i++) {
1007 pdata->pfilter_table[i][0] =
1008 pdata->pfilter_table[i][1] = 0;
1011 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1013 if (dev->net->flags & IFF_PROMISC) {
1014 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1015 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1017 if (dev->net->flags & IFF_ALLMULTI) {
1018 netif_dbg(dev, drv, dev->net,
1019 "receive all multicast enabled");
1020 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1024 if (netdev_mc_count(dev->net)) {
1025 struct netdev_hw_addr *ha;
1028 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1030 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1033 netdev_for_each_mc_addr(ha, netdev) {
1034 /* set first 32 into Perfect Filter */
1036 lan78xx_set_addr_filter(pdata, i, ha->addr);
1038 u32 bitnum = lan78xx_hash(ha->addr);
1040 pdata->mchash_table[bitnum / 32] |=
1041 (1 << (bitnum % 32));
1042 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1048 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1050 /* defer register writes to a sleepable context */
1051 schedule_work(&pdata->set_multicast);
1054 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1055 u16 lcladv, u16 rmtadv)
1057 u32 flow = 0, fct_flow = 0;
1061 if (dev->fc_autoneg)
1062 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1064 cap = dev->fc_request_control;
1066 if (cap & FLOW_CTRL_TX)
1067 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1069 if (cap & FLOW_CTRL_RX)
1070 flow |= FLOW_CR_RX_FCEN_;
1072 if (dev->udev->speed == USB_SPEED_SUPER)
1074 else if (dev->udev->speed == USB_SPEED_HIGH)
1077 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1078 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1079 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1081 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1083 /* threshold value should be set before enabling flow */
1084 ret = lan78xx_write_reg(dev, FLOW, flow);
1089 static int lan78xx_link_reset(struct lan78xx_net *dev)
1091 struct phy_device *phydev = dev->net->phydev;
1092 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1093 int ladv, radv, ret;
1096 /* clear PHY interrupt status */
1097 ret = phy_read(phydev, LAN88XX_INT_STS);
1098 if (unlikely(ret < 0))
1101 /* clear LAN78xx interrupt status */
1102 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1103 if (unlikely(ret < 0))
1106 phy_read_status(phydev);
1108 if (!phydev->link && dev->link_on) {
1109 dev->link_on = false;
1112 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1113 if (unlikely(ret < 0))
1116 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1117 if (unlikely(ret < 0))
1120 phy_mac_interrupt(phydev, 0);
1122 del_timer(&dev->stat_monitor);
1123 } else if (phydev->link && !dev->link_on) {
1124 dev->link_on = true;
1126 phy_ethtool_gset(phydev, &ecmd);
1128 ret = phy_read(phydev, LAN88XX_INT_STS);
1130 if (dev->udev->speed == USB_SPEED_SUPER) {
1131 if (ethtool_cmd_speed(&ecmd) == 1000) {
1133 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1134 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1135 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1137 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1138 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1139 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1141 /* enable U1 & U2 */
1142 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1143 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1144 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1145 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1149 ladv = phy_read(phydev, MII_ADVERTISE);
1153 radv = phy_read(phydev, MII_LPA);
1157 netif_dbg(dev, link, dev->net,
1158 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1159 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1161 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1162 phy_mac_interrupt(phydev, 1);
1164 if (!timer_pending(&dev->stat_monitor)) {
1166 mod_timer(&dev->stat_monitor,
1167 jiffies + STAT_UPDATE_TIMER);
1170 tasklet_schedule(&dev->bh);
1176 /* some work can't be done in tasklets, so we use keventd
1178 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1179 * but tasklet_schedule() doesn't. hope the failure is rare.
1181 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1183 set_bit(work, &dev->flags);
1184 if (!schedule_delayed_work(&dev->wq, 0))
1185 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1192 if (urb->actual_length != 4) {
1193 netdev_warn(dev->net,
1194 "unexpected urb length %d", urb->actual_length);
1198 memcpy(&intdata, urb->transfer_buffer, 4);
1199 le32_to_cpus(&intdata);
1201 if (intdata & INT_ENP_PHY_INT) {
1202 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1203 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1205 netdev_warn(dev->net,
1206 "unexpected interrupt: 0x%08x\n", intdata);
1209 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1211 return MAX_EEPROM_SIZE;
1214 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1215 struct ethtool_eeprom *ee, u8 *data)
1217 struct lan78xx_net *dev = netdev_priv(netdev);
1219 ee->magic = LAN78XX_EEPROM_MAGIC;
1221 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1225 struct ethtool_eeprom *ee, u8 *data)
1227 struct lan78xx_net *dev = netdev_priv(netdev);
1229 /* Allow entire eeprom update only */
1230 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1231 (ee->offset == 0) &&
1233 (data[0] == EEPROM_INDICATOR))
1234 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1235 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1236 (ee->offset == 0) &&
1238 (data[0] == OTP_INDICATOR_1))
1239 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1244 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1247 if (stringset == ETH_SS_STATS)
1248 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1253 if (sset == ETH_SS_STATS)
1254 return ARRAY_SIZE(lan78xx_gstrings);
1259 static void lan78xx_get_stats(struct net_device *netdev,
1260 struct ethtool_stats *stats, u64 *data)
1262 struct lan78xx_net *dev = netdev_priv(netdev);
1264 lan78xx_update_stats(dev);
1266 mutex_lock(&dev->stats.access_lock);
1267 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1268 mutex_unlock(&dev->stats.access_lock);
1271 static void lan78xx_get_wol(struct net_device *netdev,
1272 struct ethtool_wolinfo *wol)
1274 struct lan78xx_net *dev = netdev_priv(netdev);
1277 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1279 if (usb_autopm_get_interface(dev->intf) < 0)
1282 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1283 if (unlikely(ret < 0)) {
1287 if (buf & USB_CFG_RMT_WKP_) {
1288 wol->supported = WAKE_ALL;
1289 wol->wolopts = pdata->wol;
1296 usb_autopm_put_interface(dev->intf);
1299 static int lan78xx_set_wol(struct net_device *netdev,
1300 struct ethtool_wolinfo *wol)
1302 struct lan78xx_net *dev = netdev_priv(netdev);
1303 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1306 ret = usb_autopm_get_interface(dev->intf);
1310 if (wol->wolopts & ~WAKE_ALL)
1313 pdata->wol = wol->wolopts;
1315 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1317 phy_ethtool_set_wol(netdev->phydev, wol);
1319 usb_autopm_put_interface(dev->intf);
1324 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1326 struct lan78xx_net *dev = netdev_priv(net);
1327 struct phy_device *phydev = net->phydev;
1331 ret = usb_autopm_get_interface(dev->intf);
1335 ret = phy_ethtool_get_eee(phydev, edata);
1339 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1340 if (buf & MAC_CR_EEE_EN_) {
1341 edata->eee_enabled = true;
1342 edata->eee_active = !!(edata->advertised &
1343 edata->lp_advertised);
1344 edata->tx_lpi_enabled = true;
1345 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1346 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1347 edata->tx_lpi_timer = buf;
1349 edata->eee_enabled = false;
1350 edata->eee_active = false;
1351 edata->tx_lpi_enabled = false;
1352 edata->tx_lpi_timer = 0;
1357 usb_autopm_put_interface(dev->intf);
1362 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1364 struct lan78xx_net *dev = netdev_priv(net);
1368 ret = usb_autopm_get_interface(dev->intf);
1372 if (edata->eee_enabled) {
1373 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1374 buf |= MAC_CR_EEE_EN_;
1375 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1377 phy_ethtool_set_eee(net->phydev, edata);
1379 buf = (u32)edata->tx_lpi_timer;
1380 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1382 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1383 buf &= ~MAC_CR_EEE_EN_;
1384 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1387 usb_autopm_put_interface(dev->intf);
1392 static u32 lan78xx_get_link(struct net_device *net)
1394 phy_read_status(net->phydev);
1396 return net->phydev->link;
1399 static int lan78xx_nway_reset(struct net_device *net)
1401 return phy_start_aneg(net->phydev);
1404 static void lan78xx_get_drvinfo(struct net_device *net,
1405 struct ethtool_drvinfo *info)
1407 struct lan78xx_net *dev = netdev_priv(net);
1409 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1410 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1411 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1414 static u32 lan78xx_get_msglevel(struct net_device *net)
1416 struct lan78xx_net *dev = netdev_priv(net);
1418 return dev->msg_enable;
1421 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1423 struct lan78xx_net *dev = netdev_priv(net);
1425 dev->msg_enable = level;
1428 static int lan78xx_get_mdix_status(struct net_device *net)
1430 struct phy_device *phydev = net->phydev;
1433 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1434 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1435 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1440 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1442 struct lan78xx_net *dev = netdev_priv(net);
1443 struct phy_device *phydev = net->phydev;
1446 if (mdix_ctrl == ETH_TP_MDI) {
1447 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1448 LAN88XX_EXT_PAGE_SPACE_1);
1449 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1450 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1451 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1452 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1453 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1454 LAN88XX_EXT_PAGE_SPACE_0);
1455 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1456 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1457 LAN88XX_EXT_PAGE_SPACE_1);
1458 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1459 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1460 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1461 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1462 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1463 LAN88XX_EXT_PAGE_SPACE_0);
1464 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1465 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1466 LAN88XX_EXT_PAGE_SPACE_1);
1467 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1468 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1469 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1470 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1471 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1472 LAN88XX_EXT_PAGE_SPACE_0);
1474 dev->mdix_ctrl = mdix_ctrl;
1477 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1479 struct lan78xx_net *dev = netdev_priv(net);
1480 struct phy_device *phydev = net->phydev;
1484 ret = usb_autopm_get_interface(dev->intf);
1488 ret = phy_ethtool_gset(phydev, cmd);
1490 buf = lan78xx_get_mdix_status(net);
1492 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1493 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1494 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1495 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1496 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1497 cmd->eth_tp_mdix = ETH_TP_MDI;
1498 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1499 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1500 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1501 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1504 usb_autopm_put_interface(dev->intf);
1509 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1511 struct lan78xx_net *dev = netdev_priv(net);
1512 struct phy_device *phydev = net->phydev;
1516 ret = usb_autopm_get_interface(dev->intf);
1520 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1521 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1524 /* change speed & duplex */
1525 ret = phy_ethtool_sset(phydev, cmd);
1527 if (!cmd->autoneg) {
1528 /* force link down */
1529 temp = phy_read(phydev, MII_BMCR);
1530 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1532 phy_write(phydev, MII_BMCR, temp);
1535 usb_autopm_put_interface(dev->intf);
1540 static void lan78xx_get_pause(struct net_device *net,
1541 struct ethtool_pauseparam *pause)
1543 struct lan78xx_net *dev = netdev_priv(net);
1544 struct phy_device *phydev = net->phydev;
1545 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1547 phy_ethtool_gset(phydev, &ecmd);
1549 pause->autoneg = dev->fc_autoneg;
1551 if (dev->fc_request_control & FLOW_CTRL_TX)
1552 pause->tx_pause = 1;
1554 if (dev->fc_request_control & FLOW_CTRL_RX)
1555 pause->rx_pause = 1;
1558 static int lan78xx_set_pause(struct net_device *net,
1559 struct ethtool_pauseparam *pause)
1561 struct lan78xx_net *dev = netdev_priv(net);
1562 struct phy_device *phydev = net->phydev;
1563 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1566 phy_ethtool_gset(phydev, &ecmd);
1568 if (pause->autoneg && !ecmd.autoneg) {
1573 dev->fc_request_control = 0;
1574 if (pause->rx_pause)
1575 dev->fc_request_control |= FLOW_CTRL_RX;
1577 if (pause->tx_pause)
1578 dev->fc_request_control |= FLOW_CTRL_TX;
1583 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1584 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1585 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1586 phy_ethtool_sset(phydev, &ecmd);
1589 dev->fc_autoneg = pause->autoneg;
1596 static const struct ethtool_ops lan78xx_ethtool_ops = {
1597 .get_link = lan78xx_get_link,
1598 .nway_reset = lan78xx_nway_reset,
1599 .get_drvinfo = lan78xx_get_drvinfo,
1600 .get_msglevel = lan78xx_get_msglevel,
1601 .set_msglevel = lan78xx_set_msglevel,
1602 .get_settings = lan78xx_get_settings,
1603 .set_settings = lan78xx_set_settings,
1604 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1605 .get_eeprom = lan78xx_ethtool_get_eeprom,
1606 .set_eeprom = lan78xx_ethtool_set_eeprom,
1607 .get_ethtool_stats = lan78xx_get_stats,
1608 .get_sset_count = lan78xx_get_sset_count,
1609 .get_strings = lan78xx_get_strings,
1610 .get_wol = lan78xx_get_wol,
1611 .set_wol = lan78xx_set_wol,
1612 .get_eee = lan78xx_get_eee,
1613 .set_eee = lan78xx_set_eee,
1614 .get_pauseparam = lan78xx_get_pause,
1615 .set_pauseparam = lan78xx_set_pause,
1618 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1620 if (!netif_running(netdev))
1623 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1626 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1628 u32 addr_lo, addr_hi;
1632 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1633 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1635 addr[0] = addr_lo & 0xFF;
1636 addr[1] = (addr_lo >> 8) & 0xFF;
1637 addr[2] = (addr_lo >> 16) & 0xFF;
1638 addr[3] = (addr_lo >> 24) & 0xFF;
1639 addr[4] = addr_hi & 0xFF;
1640 addr[5] = (addr_hi >> 8) & 0xFF;
1642 if (!is_valid_ether_addr(addr)) {
1643 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1644 /* valid address present in Device Tree */
1645 netif_dbg(dev, ifup, dev->net,
1646 "MAC address read from Device Tree");
1647 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1648 ETH_ALEN, addr) == 0) ||
1649 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1650 ETH_ALEN, addr) == 0)) &&
1651 is_valid_ether_addr(addr)) {
1652 /* eeprom values are valid so use them */
1653 netif_dbg(dev, ifup, dev->net,
1654 "MAC address read from EEPROM");
1656 /* generate random MAC */
1657 random_ether_addr(addr);
1658 netif_dbg(dev, ifup, dev->net,
1659 "MAC address set to random addr");
1662 addr_lo = addr[0] | (addr[1] << 8) |
1663 (addr[2] << 16) | (addr[3] << 24);
1664 addr_hi = addr[4] | (addr[5] << 8);
1666 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1667 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1670 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1671 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1673 ether_addr_copy(dev->net->dev_addr, addr);
1676 /* MDIO read and write wrappers for phylib */
1677 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1679 struct lan78xx_net *dev = bus->priv;
1683 ret = usb_autopm_get_interface(dev->intf);
1687 mutex_lock(&dev->phy_mutex);
1689 /* confirm MII not busy */
1690 ret = lan78xx_phy_wait_not_busy(dev);
1694 /* set the address, index & direction (read from PHY) */
1695 addr = mii_access(phy_id, idx, MII_READ);
1696 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1698 ret = lan78xx_phy_wait_not_busy(dev);
1702 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1704 ret = (int)(val & 0xFFFF);
1707 mutex_unlock(&dev->phy_mutex);
1708 usb_autopm_put_interface(dev->intf);
1712 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1715 struct lan78xx_net *dev = bus->priv;
1719 ret = usb_autopm_get_interface(dev->intf);
1723 mutex_lock(&dev->phy_mutex);
1725 /* confirm MII not busy */
1726 ret = lan78xx_phy_wait_not_busy(dev);
1731 ret = lan78xx_write_reg(dev, MII_DATA, val);
1733 /* set the address, index & direction (write to PHY) */
1734 addr = mii_access(phy_id, idx, MII_WRITE);
1735 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1737 ret = lan78xx_phy_wait_not_busy(dev);
1742 mutex_unlock(&dev->phy_mutex);
1743 usb_autopm_put_interface(dev->intf);
1747 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1751 dev->mdiobus = mdiobus_alloc();
1752 if (!dev->mdiobus) {
1753 netdev_err(dev->net, "can't allocate MDIO bus\n");
1757 dev->mdiobus->priv = (void *)dev;
1758 dev->mdiobus->read = lan78xx_mdiobus_read;
1759 dev->mdiobus->write = lan78xx_mdiobus_write;
1760 dev->mdiobus->name = "lan78xx-mdiobus";
1761 dev->mdiobus->parent = &dev->udev->dev;
1763 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1764 dev->udev->bus->busnum, dev->udev->devnum);
1766 switch (dev->chipid) {
1767 case ID_REV_CHIP_ID_7800_:
1768 case ID_REV_CHIP_ID_7850_:
1769 /* set to internal PHY id */
1770 dev->mdiobus->phy_mask = ~(1 << 1);
1774 ret = mdiobus_register(dev->mdiobus);
1776 netdev_err(dev->net, "can't register MDIO bus\n");
1780 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1783 mdiobus_free(dev->mdiobus);
1787 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1789 mdiobus_unregister(dev->mdiobus);
1790 mdiobus_free(dev->mdiobus);
1793 static void lan78xx_link_status_change(struct net_device *net)
1795 struct phy_device *phydev = net->phydev;
1798 /* At forced 100 F/H mode, chip may fail to set mode correctly
1799 * when cable is switched between long(~50+m) and short one.
1800 * As workaround, set to 10 before setting to 100
1801 * at forced 100 F/H mode.
1803 if (!phydev->autoneg && (phydev->speed == 100)) {
1804 /* disable phy interrupt */
1805 temp = phy_read(phydev, LAN88XX_INT_MASK);
1806 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1807 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1809 temp = phy_read(phydev, MII_BMCR);
1810 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1811 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1812 temp |= BMCR_SPEED100;
1813 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1815 /* clear pending interrupt generated while workaround */
1816 temp = phy_read(phydev, LAN88XX_INT_STS);
1818 /* enable phy interrupt back */
1819 temp = phy_read(phydev, LAN88XX_INT_MASK);
1820 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1821 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1825 static int lan78xx_phy_init(struct lan78xx_net *dev)
1829 struct phy_device *phydev = dev->net->phydev;
1831 phydev = phy_find_first(dev->mdiobus);
1833 netdev_err(dev->net, "no PHY found\n");
1837 /* Enable PHY interrupts.
1838 * We handle our own interrupt
1840 ret = phy_read(phydev, LAN88XX_INT_STS);
1841 ret = phy_write(phydev, LAN88XX_INT_MASK,
1842 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1843 LAN88XX_INT_MASK_LINK_CHANGE_);
1845 phydev->irq = PHY_IGNORE_INTERRUPT;
1847 ret = phy_connect_direct(dev->net, phydev,
1848 lan78xx_link_status_change,
1849 PHY_INTERFACE_MODE_GMII);
1851 netdev_err(dev->net, "can't attach PHY to %s\n",
1856 /* set to AUTOMDIX */
1857 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1859 /* MAC doesn't support 1000T Half */
1860 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1862 /* support both flow controls */
1863 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1864 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1865 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1866 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1868 genphy_config_aneg(phydev);
1870 dev->fc_autoneg = phydev->autoneg;
1874 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1879 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1885 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1887 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1890 buf &= ~MAC_RX_RXEN_;
1891 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1894 /* add 4 to size for FCS */
1895 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1896 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1898 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1901 buf |= MAC_RX_RXEN_;
1902 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1908 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1910 struct sk_buff *skb;
1911 unsigned long flags;
1914 spin_lock_irqsave(&q->lock, flags);
1915 while (!skb_queue_empty(q)) {
1916 struct skb_data *entry;
1920 skb_queue_walk(q, skb) {
1921 entry = (struct skb_data *)skb->cb;
1922 if (entry->state != unlink_start)
1927 entry->state = unlink_start;
1930 /* Get reference count of the URB to avoid it to be
1931 * freed during usb_unlink_urb, which may trigger
1932 * use-after-free problem inside usb_unlink_urb since
1933 * usb_unlink_urb is always racing with .complete
1934 * handler(include defer_bh).
1937 spin_unlock_irqrestore(&q->lock, flags);
1938 /* during some PM-driven resume scenarios,
1939 * these (async) unlinks complete immediately
1941 ret = usb_unlink_urb(urb);
1942 if (ret != -EINPROGRESS && ret != 0)
1943 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1947 spin_lock_irqsave(&q->lock, flags);
1949 spin_unlock_irqrestore(&q->lock, flags);
1953 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1955 struct lan78xx_net *dev = netdev_priv(netdev);
1956 int ll_mtu = new_mtu + netdev->hard_header_len;
1957 int old_hard_mtu = dev->hard_mtu;
1958 int old_rx_urb_size = dev->rx_urb_size;
1961 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1966 /* no second zero-length packet read wanted after mtu-sized packets */
1967 if ((ll_mtu % dev->maxpacket) == 0)
1970 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1972 netdev->mtu = new_mtu;
1974 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1975 if (dev->rx_urb_size == old_hard_mtu) {
1976 dev->rx_urb_size = dev->hard_mtu;
1977 if (dev->rx_urb_size > old_rx_urb_size) {
1978 if (netif_running(dev->net)) {
1979 unlink_urbs(dev, &dev->rxq);
1980 tasklet_schedule(&dev->bh);
1988 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1990 struct lan78xx_net *dev = netdev_priv(netdev);
1991 struct sockaddr *addr = p;
1992 u32 addr_lo, addr_hi;
1995 if (netif_running(netdev))
1998 if (!is_valid_ether_addr(addr->sa_data))
1999 return -EADDRNOTAVAIL;
2001 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2003 addr_lo = netdev->dev_addr[0] |
2004 netdev->dev_addr[1] << 8 |
2005 netdev->dev_addr[2] << 16 |
2006 netdev->dev_addr[3] << 24;
2007 addr_hi = netdev->dev_addr[4] |
2008 netdev->dev_addr[5] << 8;
2010 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2011 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2013 /* Added to support MAC address changes */
2014 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2015 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2020 /* Enable or disable Rx checksum offload engine */
2021 static int lan78xx_set_features(struct net_device *netdev,
2022 netdev_features_t features)
2024 struct lan78xx_net *dev = netdev_priv(netdev);
2025 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2026 unsigned long flags;
2029 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2031 if (features & NETIF_F_RXCSUM) {
2032 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2033 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2035 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2036 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2039 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2040 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2042 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2044 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2046 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2051 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2053 struct lan78xx_priv *pdata =
2054 container_of(param, struct lan78xx_priv, set_vlan);
2055 struct lan78xx_net *dev = pdata->dev;
2057 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2058 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2061 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2062 __be16 proto, u16 vid)
2064 struct lan78xx_net *dev = netdev_priv(netdev);
2065 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2067 u16 vid_dword_index;
2069 vid_dword_index = (vid >> 5) & 0x7F;
2070 vid_bit_index = vid & 0x1F;
2072 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2074 /* defer register writes to a sleepable context */
2075 schedule_work(&pdata->set_vlan);
2080 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2081 __be16 proto, u16 vid)
2083 struct lan78xx_net *dev = netdev_priv(netdev);
2084 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2086 u16 vid_dword_index;
2088 vid_dword_index = (vid >> 5) & 0x7F;
2089 vid_bit_index = vid & 0x1F;
2091 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2093 /* defer register writes to a sleepable context */
2094 schedule_work(&pdata->set_vlan);
2099 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2103 u32 regs[6] = { 0 };
2105 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2106 if (buf & USB_CFG1_LTM_ENABLE_) {
2108 /* Get values from EEPROM first */
2109 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2110 if (temp[0] == 24) {
2111 ret = lan78xx_read_raw_eeprom(dev,
2118 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2119 if (temp[0] == 24) {
2120 ret = lan78xx_read_raw_otp(dev,
2130 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2131 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2132 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2133 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2134 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2135 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2138 static int lan78xx_reset(struct lan78xx_net *dev)
2140 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2143 unsigned long timeout;
2145 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2146 buf |= HW_CFG_LRST_;
2147 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2149 timeout = jiffies + HZ;
2152 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2153 if (time_after(jiffies, timeout)) {
2154 netdev_warn(dev->net,
2155 "timeout on completion of LiteReset");
2158 } while (buf & HW_CFG_LRST_);
2160 lan78xx_init_mac_address(dev);
2162 /* save DEVID for later usage */
2163 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2164 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2165 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2167 /* Respond to the IN token with a NAK */
2168 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2169 buf |= USB_CFG_BIR_;
2170 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2173 lan78xx_init_ltm(dev);
2175 dev->net->hard_header_len += TX_OVERHEAD;
2176 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2178 if (dev->udev->speed == USB_SPEED_SUPER) {
2179 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2180 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2183 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2184 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2185 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2186 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2187 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2189 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2190 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2195 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2196 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2198 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2200 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2202 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2203 buf |= USB_CFG_BCE_;
2204 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2206 /* set FIFO sizes */
2207 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2208 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2210 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2211 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2213 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2214 ret = lan78xx_write_reg(dev, FLOW, 0);
2215 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2217 /* Don't need rfe_ctl_lock during initialisation */
2218 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2219 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2220 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2222 /* Enable or disable checksum offload engines */
2223 lan78xx_set_features(dev->net, dev->net->features);
2225 lan78xx_set_multicast(dev->net);
2228 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2229 buf |= PMT_CTL_PHY_RST_;
2230 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2232 timeout = jiffies + HZ;
2235 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2236 if (time_after(jiffies, timeout)) {
2237 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2240 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2242 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2243 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2244 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2246 /* enable PHY interrupts */
2247 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2248 buf |= INT_ENP_PHY_INT;
2249 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2251 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2252 buf |= MAC_TX_TXEN_;
2253 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2255 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2256 buf |= FCT_TX_CTL_EN_;
2257 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2259 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2261 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2262 buf |= MAC_RX_RXEN_;
2263 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2265 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2266 buf |= FCT_RX_CTL_EN_;
2267 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2272 static void lan78xx_init_stats(struct lan78xx_net *dev)
2277 /* initialize for stats update
2278 * some counters are 20bits and some are 32bits
2280 p = (u32 *)&dev->stats.rollover_max;
2281 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2284 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2285 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2286 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2287 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2288 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2289 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2290 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2291 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2292 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2293 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2295 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2298 static int lan78xx_open(struct net_device *net)
2300 struct lan78xx_net *dev = netdev_priv(net);
2303 ret = usb_autopm_get_interface(dev->intf);
2307 ret = lan78xx_reset(dev);
2311 ret = lan78xx_phy_init(dev);
2315 /* for Link Check */
2316 if (dev->urb_intr) {
2317 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2319 netif_err(dev, ifup, dev->net,
2320 "intr submit %d\n", ret);
2325 lan78xx_init_stats(dev);
2327 set_bit(EVENT_DEV_OPEN, &dev->flags);
2329 netif_start_queue(net);
2331 dev->link_on = false;
2333 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2335 usb_autopm_put_interface(dev->intf);
2341 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2343 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2344 DECLARE_WAITQUEUE(wait, current);
2347 /* ensure there are no more active urbs */
2348 add_wait_queue(&unlink_wakeup, &wait);
2349 set_current_state(TASK_UNINTERRUPTIBLE);
2350 dev->wait = &unlink_wakeup;
2351 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2353 /* maybe wait for deletions to finish. */
2354 while (!skb_queue_empty(&dev->rxq) &&
2355 !skb_queue_empty(&dev->txq) &&
2356 !skb_queue_empty(&dev->done)) {
2357 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2358 set_current_state(TASK_UNINTERRUPTIBLE);
2359 netif_dbg(dev, ifdown, dev->net,
2360 "waited for %d urb completions\n", temp);
2362 set_current_state(TASK_RUNNING);
2364 remove_wait_queue(&unlink_wakeup, &wait);
2367 static int lan78xx_stop(struct net_device *net)
2369 struct lan78xx_net *dev = netdev_priv(net);
2371 if (timer_pending(&dev->stat_monitor))
2372 del_timer_sync(&dev->stat_monitor);
2374 phy_stop(net->phydev);
2375 phy_disconnect(net->phydev);
2378 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2379 netif_stop_queue(net);
2381 netif_info(dev, ifdown, dev->net,
2382 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2383 net->stats.rx_packets, net->stats.tx_packets,
2384 net->stats.rx_errors, net->stats.tx_errors);
2386 lan78xx_terminate_urbs(dev);
2388 usb_kill_urb(dev->urb_intr);
2390 skb_queue_purge(&dev->rxq_pause);
2392 /* deferred work (task, timer, softirq) must also stop.
2393 * can't flush_scheduled_work() until we drop rtnl (later),
2394 * else workers could deadlock; so make workers a NOP.
2397 cancel_delayed_work_sync(&dev->wq);
2398 tasklet_kill(&dev->bh);
2400 usb_autopm_put_interface(dev->intf);
2405 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2406 struct sk_buff *skb, gfp_t flags)
2408 u32 tx_cmd_a, tx_cmd_b;
2410 if (skb_cow_head(skb, TX_OVERHEAD)) {
2411 dev_kfree_skb_any(skb);
2415 if (skb_linearize(skb)) {
2416 dev_kfree_skb_any(skb);
2420 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2422 if (skb->ip_summed == CHECKSUM_PARTIAL)
2423 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2426 if (skb_is_gso(skb)) {
2427 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2429 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2431 tx_cmd_a |= TX_CMD_A_LSO_;
2434 if (skb_vlan_tag_present(skb)) {
2435 tx_cmd_a |= TX_CMD_A_IVTG_;
2436 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2440 cpu_to_le32s(&tx_cmd_b);
2441 memcpy(skb->data, &tx_cmd_b, 4);
2444 cpu_to_le32s(&tx_cmd_a);
2445 memcpy(skb->data, &tx_cmd_a, 4);
2450 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2451 struct sk_buff_head *list, enum skb_state state)
2453 unsigned long flags;
2454 enum skb_state old_state;
2455 struct skb_data *entry = (struct skb_data *)skb->cb;
2457 spin_lock_irqsave(&list->lock, flags);
2458 old_state = entry->state;
2459 entry->state = state;
2461 __skb_unlink(skb, list);
2462 spin_unlock(&list->lock);
2463 spin_lock(&dev->done.lock);
2465 __skb_queue_tail(&dev->done, skb);
2466 if (skb_queue_len(&dev->done) == 1)
2467 tasklet_schedule(&dev->bh);
2468 spin_unlock_irqrestore(&dev->done.lock, flags);
2473 static void tx_complete(struct urb *urb)
2475 struct sk_buff *skb = (struct sk_buff *)urb->context;
2476 struct skb_data *entry = (struct skb_data *)skb->cb;
2477 struct lan78xx_net *dev = entry->dev;
2479 if (urb->status == 0) {
2480 dev->net->stats.tx_packets += entry->num_of_packet;
2481 dev->net->stats.tx_bytes += entry->length;
2483 dev->net->stats.tx_errors++;
2485 switch (urb->status) {
2487 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2490 /* software-driven interface shutdown */
2498 netif_stop_queue(dev->net);
2501 netif_dbg(dev, tx_err, dev->net,
2502 "tx err %d\n", entry->urb->status);
2507 usb_autopm_put_interface_async(dev->intf);
2509 defer_bh(dev, skb, &dev->txq, tx_done);
2512 static void lan78xx_queue_skb(struct sk_buff_head *list,
2513 struct sk_buff *newsk, enum skb_state state)
2515 struct skb_data *entry = (struct skb_data *)newsk->cb;
2517 __skb_queue_tail(list, newsk);
2518 entry->state = state;
2522 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2524 struct lan78xx_net *dev = netdev_priv(net);
2525 struct sk_buff *skb2 = NULL;
2528 skb_tx_timestamp(skb);
2529 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2533 skb_queue_tail(&dev->txq_pend, skb2);
2535 /* throttle TX patch at slower than SUPER SPEED USB */
2536 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2537 (skb_queue_len(&dev->txq_pend) > 10))
2538 netif_stop_queue(net);
2540 netif_dbg(dev, tx_err, dev->net,
2541 "lan78xx_tx_prep return NULL\n");
2542 dev->net->stats.tx_errors++;
2543 dev->net->stats.tx_dropped++;
2546 tasklet_schedule(&dev->bh);
2548 return NETDEV_TX_OK;
2551 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2553 struct lan78xx_priv *pdata = NULL;
2557 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2559 pdata = (struct lan78xx_priv *)(dev->data[0]);
2561 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2567 spin_lock_init(&pdata->rfe_ctl_lock);
2568 mutex_init(&pdata->dataport_mutex);
2570 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2572 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2573 pdata->vlan_table[i] = 0;
2575 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2577 dev->net->features = 0;
2579 if (DEFAULT_TX_CSUM_ENABLE)
2580 dev->net->features |= NETIF_F_HW_CSUM;
2582 if (DEFAULT_RX_CSUM_ENABLE)
2583 dev->net->features |= NETIF_F_RXCSUM;
2585 if (DEFAULT_TSO_CSUM_ENABLE)
2586 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2588 dev->net->hw_features = dev->net->features;
2590 /* Init all registers */
2591 ret = lan78xx_reset(dev);
2593 lan78xx_mdio_init(dev);
2595 dev->net->flags |= IFF_MULTICAST;
2597 pdata->wol = WAKE_MAGIC;
2602 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2604 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2606 lan78xx_remove_mdio(dev);
2609 netif_dbg(dev, ifdown, dev->net, "free pdata");
2616 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2617 struct sk_buff *skb,
2618 u32 rx_cmd_a, u32 rx_cmd_b)
2620 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2621 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2622 skb->ip_summed = CHECKSUM_NONE;
2624 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2625 skb->ip_summed = CHECKSUM_COMPLETE;
2629 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2633 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2634 skb_queue_tail(&dev->rxq_pause, skb);
2638 dev->net->stats.rx_packets++;
2639 dev->net->stats.rx_bytes += skb->len;
2641 skb->protocol = eth_type_trans(skb, dev->net);
2643 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2644 skb->len + sizeof(struct ethhdr), skb->protocol);
2645 memset(skb->cb, 0, sizeof(struct skb_data));
2647 if (skb_defer_rx_timestamp(skb))
2650 status = netif_rx(skb);
2651 if (status != NET_RX_SUCCESS)
2652 netif_dbg(dev, rx_err, dev->net,
2653 "netif_rx status %d\n", status);
2656 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2658 if (skb->len < dev->net->hard_header_len)
2661 while (skb->len > 0) {
2662 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2664 struct sk_buff *skb2;
2665 unsigned char *packet;
2667 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2668 le32_to_cpus(&rx_cmd_a);
2669 skb_pull(skb, sizeof(rx_cmd_a));
2671 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2672 le32_to_cpus(&rx_cmd_b);
2673 skb_pull(skb, sizeof(rx_cmd_b));
2675 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2676 le16_to_cpus(&rx_cmd_c);
2677 skb_pull(skb, sizeof(rx_cmd_c));
2681 /* get the packet length */
2682 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2683 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2685 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2686 netif_dbg(dev, rx_err, dev->net,
2687 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2689 /* last frame in this batch */
2690 if (skb->len == size) {
2691 lan78xx_rx_csum_offload(dev, skb,
2692 rx_cmd_a, rx_cmd_b);
2694 skb_trim(skb, skb->len - 4); /* remove fcs */
2695 skb->truesize = size + sizeof(struct sk_buff);
2700 skb2 = skb_clone(skb, GFP_ATOMIC);
2701 if (unlikely(!skb2)) {
2702 netdev_warn(dev->net, "Error allocating skb");
2707 skb2->data = packet;
2708 skb_set_tail_pointer(skb2, size);
2710 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2712 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2713 skb2->truesize = size + sizeof(struct sk_buff);
2715 lan78xx_skb_return(dev, skb2);
2718 skb_pull(skb, size);
2720 /* padding bytes before the next frame starts */
2722 skb_pull(skb, align_count);
2728 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2730 if (!lan78xx_rx(dev, skb)) {
2731 dev->net->stats.rx_errors++;
2736 lan78xx_skb_return(dev, skb);
2740 netif_dbg(dev, rx_err, dev->net, "drop\n");
2741 dev->net->stats.rx_errors++;
2743 skb_queue_tail(&dev->done, skb);
2746 static void rx_complete(struct urb *urb);
2748 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2750 struct sk_buff *skb;
2751 struct skb_data *entry;
2752 unsigned long lockflags;
2753 size_t size = dev->rx_urb_size;
2756 skb = netdev_alloc_skb_ip_align(dev->net, size);
2762 entry = (struct skb_data *)skb->cb;
2767 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2768 skb->data, size, rx_complete, skb);
2770 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2772 if (netif_device_present(dev->net) &&
2773 netif_running(dev->net) &&
2774 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2775 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2776 ret = usb_submit_urb(urb, GFP_ATOMIC);
2779 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2782 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2785 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2786 netif_device_detach(dev->net);
2792 netif_dbg(dev, rx_err, dev->net,
2793 "rx submit, %d\n", ret);
2794 tasklet_schedule(&dev->bh);
2797 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2800 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2802 dev_kfree_skb_any(skb);
2808 static void rx_complete(struct urb *urb)
2810 struct sk_buff *skb = (struct sk_buff *)urb->context;
2811 struct skb_data *entry = (struct skb_data *)skb->cb;
2812 struct lan78xx_net *dev = entry->dev;
2813 int urb_status = urb->status;
2814 enum skb_state state;
2816 skb_put(skb, urb->actual_length);
2820 switch (urb_status) {
2822 if (skb->len < dev->net->hard_header_len) {
2824 dev->net->stats.rx_errors++;
2825 dev->net->stats.rx_length_errors++;
2826 netif_dbg(dev, rx_err, dev->net,
2827 "rx length %d\n", skb->len);
2829 usb_mark_last_busy(dev->udev);
2832 dev->net->stats.rx_errors++;
2833 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2835 case -ECONNRESET: /* async unlink */
2836 case -ESHUTDOWN: /* hardware gone */
2837 netif_dbg(dev, ifdown, dev->net,
2838 "rx shutdown, code %d\n", urb_status);
2846 dev->net->stats.rx_errors++;
2852 /* data overrun ... flush fifo? */
2854 dev->net->stats.rx_over_errors++;
2859 dev->net->stats.rx_errors++;
2860 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2864 state = defer_bh(dev, skb, &dev->rxq, state);
2867 if (netif_running(dev->net) &&
2868 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2869 state != unlink_start) {
2870 rx_submit(dev, urb, GFP_ATOMIC);
2875 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2878 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2881 struct urb *urb = NULL;
2882 struct skb_data *entry;
2883 unsigned long flags;
2884 struct sk_buff_head *tqp = &dev->txq_pend;
2885 struct sk_buff *skb, *skb2;
2888 int skb_totallen, pkt_cnt;
2894 spin_lock_irqsave(&tqp->lock, flags);
2895 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2896 if (skb_is_gso(skb)) {
2898 /* handle previous packets first */
2902 length = skb->len - TX_OVERHEAD;
2903 __skb_unlink(skb, tqp);
2904 spin_unlock_irqrestore(&tqp->lock, flags);
2908 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2910 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2913 spin_unlock_irqrestore(&tqp->lock, flags);
2915 /* copy to a single skb */
2916 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2920 skb_put(skb, skb_totallen);
2922 for (count = pos = 0; count < pkt_cnt; count++) {
2923 skb2 = skb_dequeue(tqp);
2925 length += (skb2->len - TX_OVERHEAD);
2926 memcpy(skb->data + pos, skb2->data, skb2->len);
2927 pos += roundup(skb2->len, sizeof(u32));
2928 dev_kfree_skb(skb2);
2933 urb = usb_alloc_urb(0, GFP_ATOMIC);
2937 entry = (struct skb_data *)skb->cb;
2940 entry->length = length;
2941 entry->num_of_packet = count;
2943 spin_lock_irqsave(&dev->txq.lock, flags);
2944 ret = usb_autopm_get_interface_async(dev->intf);
2946 spin_unlock_irqrestore(&dev->txq.lock, flags);
2950 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2951 skb->data, skb->len, tx_complete, skb);
2953 if (length % dev->maxpacket == 0) {
2954 /* send USB_ZERO_PACKET */
2955 urb->transfer_flags |= URB_ZERO_PACKET;
2959 /* if this triggers the device is still a sleep */
2960 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2961 /* transmission will be done in resume */
2962 usb_anchor_urb(urb, &dev->deferred);
2963 /* no use to process more packets */
2964 netif_stop_queue(dev->net);
2966 spin_unlock_irqrestore(&dev->txq.lock, flags);
2967 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2972 ret = usb_submit_urb(urb, GFP_ATOMIC);
2975 netif_trans_update(dev->net);
2976 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2977 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2978 netif_stop_queue(dev->net);
2981 netif_stop_queue(dev->net);
2982 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2983 usb_autopm_put_interface_async(dev->intf);
2986 usb_autopm_put_interface_async(dev->intf);
2987 netif_dbg(dev, tx_err, dev->net,
2988 "tx: submit urb err %d\n", ret);
2992 spin_unlock_irqrestore(&dev->txq.lock, flags);
2995 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2997 dev->net->stats.tx_dropped++;
2999 dev_kfree_skb_any(skb);
3002 netif_dbg(dev, tx_queued, dev->net,
3003 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3006 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3011 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3012 for (i = 0; i < 10; i++) {
3013 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3015 urb = usb_alloc_urb(0, GFP_ATOMIC);
3017 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3021 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3022 tasklet_schedule(&dev->bh);
3024 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3025 netif_wake_queue(dev->net);
3028 static void lan78xx_bh(unsigned long param)
3030 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3031 struct sk_buff *skb;
3032 struct skb_data *entry;
3034 while ((skb = skb_dequeue(&dev->done))) {
3035 entry = (struct skb_data *)(skb->cb);
3036 switch (entry->state) {
3038 entry->state = rx_cleanup;
3039 rx_process(dev, skb);
3042 usb_free_urb(entry->urb);
3046 usb_free_urb(entry->urb);
3050 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3055 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3056 /* reset update timer delta */
3057 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3059 mod_timer(&dev->stat_monitor,
3060 jiffies + STAT_UPDATE_TIMER);
3063 if (!skb_queue_empty(&dev->txq_pend))
3066 if (!timer_pending(&dev->delay) &&
3067 !test_bit(EVENT_RX_HALT, &dev->flags))
3072 static void lan78xx_delayedwork(struct work_struct *work)
3075 struct lan78xx_net *dev;
3077 dev = container_of(work, struct lan78xx_net, wq.work);
3079 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3080 unlink_urbs(dev, &dev->txq);
3081 status = usb_autopm_get_interface(dev->intf);
3084 status = usb_clear_halt(dev->udev, dev->pipe_out);
3085 usb_autopm_put_interface(dev->intf);
3088 status != -ESHUTDOWN) {
3089 if (netif_msg_tx_err(dev))
3091 netdev_err(dev->net,
3092 "can't clear tx halt, status %d\n",
3095 clear_bit(EVENT_TX_HALT, &dev->flags);
3096 if (status != -ESHUTDOWN)
3097 netif_wake_queue(dev->net);
3100 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3101 unlink_urbs(dev, &dev->rxq);
3102 status = usb_autopm_get_interface(dev->intf);
3105 status = usb_clear_halt(dev->udev, dev->pipe_in);
3106 usb_autopm_put_interface(dev->intf);
3109 status != -ESHUTDOWN) {
3110 if (netif_msg_rx_err(dev))
3112 netdev_err(dev->net,
3113 "can't clear rx halt, status %d\n",
3116 clear_bit(EVENT_RX_HALT, &dev->flags);
3117 tasklet_schedule(&dev->bh);
3121 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3124 clear_bit(EVENT_LINK_RESET, &dev->flags);
3125 status = usb_autopm_get_interface(dev->intf);
3128 if (lan78xx_link_reset(dev) < 0) {
3129 usb_autopm_put_interface(dev->intf);
3131 netdev_info(dev->net, "link reset failed (%d)\n",
3134 usb_autopm_put_interface(dev->intf);
3138 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3139 lan78xx_update_stats(dev);
3141 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3143 mod_timer(&dev->stat_monitor,
3144 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3146 dev->delta = min((dev->delta * 2), 50);
3150 static void intr_complete(struct urb *urb)
3152 struct lan78xx_net *dev = urb->context;
3153 int status = urb->status;
3158 lan78xx_status(dev, urb);
3161 /* software-driven interface shutdown */
3162 case -ENOENT: /* urb killed */
3163 case -ESHUTDOWN: /* hardware gone */
3164 netif_dbg(dev, ifdown, dev->net,
3165 "intr shutdown, code %d\n", status);
3168 /* NOTE: not throttling like RX/TX, since this endpoint
3169 * already polls infrequently
3172 netdev_dbg(dev->net, "intr status %d\n", status);
3176 if (!netif_running(dev->net))
3179 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3180 status = usb_submit_urb(urb, GFP_ATOMIC);
3182 netif_err(dev, timer, dev->net,
3183 "intr resubmit --> %d\n", status);
3186 static void lan78xx_disconnect(struct usb_interface *intf)
3188 struct lan78xx_net *dev;
3189 struct usb_device *udev;
3190 struct net_device *net;
3192 dev = usb_get_intfdata(intf);
3193 usb_set_intfdata(intf, NULL);
3197 udev = interface_to_usbdev(intf);
3200 unregister_netdev(net);
3202 cancel_delayed_work_sync(&dev->wq);
3204 usb_scuttle_anchored_urbs(&dev->deferred);
3206 lan78xx_unbind(dev, intf);
3208 usb_kill_urb(dev->urb_intr);
3209 usb_free_urb(dev->urb_intr);
3215 static void lan78xx_tx_timeout(struct net_device *net)
3217 struct lan78xx_net *dev = netdev_priv(net);
3219 unlink_urbs(dev, &dev->txq);
3220 tasklet_schedule(&dev->bh);
3223 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3224 struct net_device *netdev,
3225 netdev_features_t features)
3227 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3228 features &= ~NETIF_F_GSO_MASK;
3230 features = vlan_features_check(skb, features);
3231 features = vxlan_features_check(skb, features);
3236 static const struct net_device_ops lan78xx_netdev_ops = {
3237 .ndo_open = lan78xx_open,
3238 .ndo_stop = lan78xx_stop,
3239 .ndo_start_xmit = lan78xx_start_xmit,
3240 .ndo_tx_timeout = lan78xx_tx_timeout,
3241 .ndo_change_mtu = lan78xx_change_mtu,
3242 .ndo_set_mac_address = lan78xx_set_mac_addr,
3243 .ndo_validate_addr = eth_validate_addr,
3244 .ndo_do_ioctl = lan78xx_ioctl,
3245 .ndo_set_rx_mode = lan78xx_set_multicast,
3246 .ndo_set_features = lan78xx_set_features,
3247 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3248 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3249 .ndo_features_check = lan78xx_features_check,
3252 static void lan78xx_stat_monitor(unsigned long param)
3254 struct lan78xx_net *dev;
3256 dev = (struct lan78xx_net *)param;
3258 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3261 static int lan78xx_probe(struct usb_interface *intf,
3262 const struct usb_device_id *id)
3264 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3265 struct lan78xx_net *dev;
3266 struct net_device *netdev;
3267 struct usb_device *udev;
3273 udev = interface_to_usbdev(intf);
3274 udev = usb_get_dev(udev);
3277 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3279 dev_err(&intf->dev, "Error: OOM\n");
3283 /* netdev_printk() needs this */
3284 SET_NETDEV_DEV(netdev, &intf->dev);
3286 dev = netdev_priv(netdev);
3290 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3291 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3293 skb_queue_head_init(&dev->rxq);
3294 skb_queue_head_init(&dev->txq);
3295 skb_queue_head_init(&dev->done);
3296 skb_queue_head_init(&dev->rxq_pause);
3297 skb_queue_head_init(&dev->txq_pend);
3298 mutex_init(&dev->phy_mutex);
3300 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3301 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3302 init_usb_anchor(&dev->deferred);
3304 netdev->netdev_ops = &lan78xx_netdev_ops;
3305 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3306 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3308 dev->stat_monitor.function = lan78xx_stat_monitor;
3309 dev->stat_monitor.data = (unsigned long)dev;
3311 init_timer(&dev->stat_monitor);
3313 mutex_init(&dev->stats.access_lock);
3315 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3320 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3321 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3322 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3327 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3328 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3329 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3334 ep_intr = &intf->cur_altsetting->endpoint[2];
3335 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3340 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3341 usb_endpoint_num(&ep_intr->desc));
3343 ret = lan78xx_bind(dev, intf);
3346 strcpy(netdev->name, "eth%d");
3348 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3349 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3350 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3352 period = ep_intr->desc.bInterval;
3353 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3354 buf = kmalloc(maxp, GFP_KERNEL);
3356 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3357 if (!dev->urb_intr) {
3362 usb_fill_int_urb(dev->urb_intr, dev->udev,
3363 dev->pipe_intr, buf, maxp,
3364 intr_complete, dev, period);
3365 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3369 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3371 /* Reject broken descriptors. */
3372 if (dev->maxpacket == 0) {
3377 /* driver requires remote-wakeup capability during autosuspend. */
3378 intf->needs_remote_wakeup = 1;
3380 ret = register_netdev(netdev);
3382 netif_err(dev, probe, netdev, "couldn't register the device\n");
3386 usb_set_intfdata(intf, dev);
3388 ret = device_set_wakeup_enable(&udev->dev, true);
3390 /* Default delay of 2sec has more overhead than advantage.
3391 * Set to 10sec as default.
3393 pm_runtime_set_autosuspend_delay(&udev->dev,
3394 DEFAULT_AUTOSUSPEND_DELAY);
3399 lan78xx_unbind(dev, intf);
3401 free_netdev(netdev);
3408 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3410 const u16 crc16poly = 0x8005;
3416 for (i = 0; i < len; i++) {
3418 for (bit = 0; bit < 8; bit++) {
3422 if (msb ^ (u16)(data & 1)) {
3424 crc |= (u16)0x0001U;
3433 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3441 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3442 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3443 const u8 arp_type[2] = { 0x08, 0x06 };
3445 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3446 buf &= ~MAC_TX_TXEN_;
3447 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3448 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3449 buf &= ~MAC_RX_RXEN_;
3450 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3452 ret = lan78xx_write_reg(dev, WUCSR, 0);
3453 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3454 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3459 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3460 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3461 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3463 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3464 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3467 if (wol & WAKE_PHY) {
3468 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3470 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3471 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3472 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3474 if (wol & WAKE_MAGIC) {
3475 temp_wucsr |= WUCSR_MPEN_;
3477 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3478 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3479 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3481 if (wol & WAKE_BCAST) {
3482 temp_wucsr |= WUCSR_BCST_EN_;
3484 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3485 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3486 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3488 if (wol & WAKE_MCAST) {
3489 temp_wucsr |= WUCSR_WAKE_EN_;
3491 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3492 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3493 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3495 WUF_CFGX_TYPE_MCAST_ |
3496 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3497 (crc & WUF_CFGX_CRC16_MASK_));
3499 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3500 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3501 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3502 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3505 /* for IPv6 Multicast */
3506 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3507 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3509 WUF_CFGX_TYPE_MCAST_ |
3510 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3511 (crc & WUF_CFGX_CRC16_MASK_));
3513 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3514 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3515 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3516 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3519 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3520 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3521 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3523 if (wol & WAKE_UCAST) {
3524 temp_wucsr |= WUCSR_PFDA_EN_;
3526 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3527 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3528 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3530 if (wol & WAKE_ARP) {
3531 temp_wucsr |= WUCSR_WAKE_EN_;
3533 /* set WUF_CFG & WUF_MASK
3534 * for packettype (offset 12,13) = ARP (0x0806)
3536 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3537 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3539 WUF_CFGX_TYPE_ALL_ |
3540 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3541 (crc & WUF_CFGX_CRC16_MASK_));
3543 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3544 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3545 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3546 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3549 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3550 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3551 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3554 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3556 /* when multiple WOL bits are set */
3557 if (hweight_long((unsigned long)wol) > 1) {
3558 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3559 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3560 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3562 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3565 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3566 buf |= PMT_CTL_WUPS_MASK_;
3567 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3569 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3570 buf |= MAC_RX_RXEN_;
3571 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3576 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3578 struct lan78xx_net *dev = usb_get_intfdata(intf);
3579 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3584 event = message.event;
3586 if (!dev->suspend_count++) {
3587 spin_lock_irq(&dev->txq.lock);
3588 /* don't autosuspend while transmitting */
3589 if ((skb_queue_len(&dev->txq) ||
3590 skb_queue_len(&dev->txq_pend)) &&
3591 PMSG_IS_AUTO(message)) {
3592 spin_unlock_irq(&dev->txq.lock);
3596 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3597 spin_unlock_irq(&dev->txq.lock);
3601 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3602 buf &= ~MAC_TX_TXEN_;
3603 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3604 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3605 buf &= ~MAC_RX_RXEN_;
3606 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3608 /* empty out the rx and queues */
3609 netif_device_detach(dev->net);
3610 lan78xx_terminate_urbs(dev);
3611 usb_kill_urb(dev->urb_intr);
3614 netif_device_attach(dev->net);
3617 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3618 del_timer(&dev->stat_monitor);
3620 if (PMSG_IS_AUTO(message)) {
3621 /* auto suspend (selective suspend) */
3622 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3623 buf &= ~MAC_TX_TXEN_;
3624 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3625 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3626 buf &= ~MAC_RX_RXEN_;
3627 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3629 ret = lan78xx_write_reg(dev, WUCSR, 0);
3630 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3631 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3633 /* set goodframe wakeup */
3634 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3636 buf |= WUCSR_RFE_WAKE_EN_;
3637 buf |= WUCSR_STORE_WAKE_;
3639 ret = lan78xx_write_reg(dev, WUCSR, buf);
3641 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3643 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3644 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3646 buf |= PMT_CTL_PHY_WAKE_EN_;
3647 buf |= PMT_CTL_WOL_EN_;
3648 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3649 buf |= PMT_CTL_SUS_MODE_3_;
3651 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3653 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3655 buf |= PMT_CTL_WUPS_MASK_;
3657 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3659 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3660 buf |= MAC_RX_RXEN_;
3661 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3663 lan78xx_set_suspend(dev, pdata->wol);
3672 static int lan78xx_resume(struct usb_interface *intf)
3674 struct lan78xx_net *dev = usb_get_intfdata(intf);
3675 struct sk_buff *skb;
3680 if (!timer_pending(&dev->stat_monitor)) {
3682 mod_timer(&dev->stat_monitor,
3683 jiffies + STAT_UPDATE_TIMER);
3686 if (!--dev->suspend_count) {
3687 /* resume interrupt URBs */
3688 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3689 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3691 spin_lock_irq(&dev->txq.lock);
3692 while ((res = usb_get_from_anchor(&dev->deferred))) {
3693 skb = (struct sk_buff *)res->context;
3694 ret = usb_submit_urb(res, GFP_ATOMIC);
3696 dev_kfree_skb_any(skb);
3698 usb_autopm_put_interface_async(dev->intf);
3700 netif_trans_update(dev->net);
3701 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3705 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3706 spin_unlock_irq(&dev->txq.lock);
3708 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3709 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3710 netif_start_queue(dev->net);
3711 tasklet_schedule(&dev->bh);
3715 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3716 ret = lan78xx_write_reg(dev, WUCSR, 0);
3717 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3719 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3721 WUCSR2_IPV6_TCPSYN_RCD_ |
3722 WUCSR2_IPV4_TCPSYN_RCD_);
3724 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3725 WUCSR_EEE_RX_WAKE_ |
3727 WUCSR_RFE_WAKE_FR_ |
3732 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3733 buf |= MAC_TX_TXEN_;
3734 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3739 static int lan78xx_reset_resume(struct usb_interface *intf)
3741 struct lan78xx_net *dev = usb_get_intfdata(intf);
3745 lan78xx_phy_init(dev);
3747 return lan78xx_resume(intf);
3750 static const struct usb_device_id products[] = {
3752 /* LAN7800 USB Gigabit Ethernet Device */
3753 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3756 /* LAN7850 USB Gigabit Ethernet Device */
3757 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3761 MODULE_DEVICE_TABLE(usb, products);
3763 static struct usb_driver lan78xx_driver = {
3764 .name = DRIVER_NAME,
3765 .id_table = products,
3766 .probe = lan78xx_probe,
3767 .disconnect = lan78xx_disconnect,
3768 .suspend = lan78xx_suspend,
3769 .resume = lan78xx_resume,
3770 .reset_resume = lan78xx_reset_resume,
3771 .supports_autosuspend = 1,
3772 .disable_hub_initiated_lpm = 1,
3775 module_usb_driver(lan78xx_driver);
3777 MODULE_AUTHOR(DRIVER_AUTHOR);
3778 MODULE_DESCRIPTION(DRIVER_DESC);
3779 MODULE_LICENSE("GPL");